aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig9
-rw-r--r--lib/Kconfig.debug18
-rw-r--r--lib/Makefile8
-rw-r--r--lib/bitmap.c8
-rw-r--r--lib/bust_spinlocks.c12
-rw-r--r--lib/cmdline.c8
-rw-r--r--lib/devres.c300
-rw-r--r--lib/idr.c4
-rw-r--r--lib/iomap.c3
-rw-r--r--lib/kobject.c78
-rw-r--r--lib/sha1.c9
-rw-r--r--lib/sort.c2
-rw-r--r--lib/string.c8
-rw-r--r--lib/swiotlb.c298
-rw-r--r--lib/textsearch.c2
-rw-r--r--lib/vsprintf.c15
16 files changed, 616 insertions, 166 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 9b03581cdecb..384249915047 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -101,9 +101,14 @@ config TEXTSEARCH_FSM
101config PLIST 101config PLIST
102 boolean 102 boolean
103 103
104config IOMAP_COPY 104config HAS_IOMEM
105 boolean 105 boolean
106 depends on !UML 106 depends on !NO_IOMEM
107 default y
108
109config HAS_IOPORT
110 boolean
111 depends on HAS_IOMEM && !NO_IOPORT
107 default y 112 default y
108 113
109endmenu 114endmenu
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 5c2681875b9a..63f04c15e6f5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -77,6 +77,15 @@ config DEBUG_KERNEL
77 Say Y here if you are developing drivers or trying to debug and 77 Say Y here if you are developing drivers or trying to debug and
78 identify kernel problems. 78 identify kernel problems.
79 79
80config DEBUG_SHIRQ
81 bool "Debug shared IRQ handlers"
82 depends on DEBUG_KERNEL && GENERIC_HARDIRQS
83 help
84 Enable this to generate a spurious interrupt as soon as a shared
85 interrupt handler is registered, and just before one is deregistered.
86 Drivers ought to be able to handle interrupts coming in at those
87 points; some don't and need to be caught.
88
80config LOG_BUF_SHIFT 89config LOG_BUF_SHIFT
81 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL 90 int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" if DEBUG_KERNEL
82 range 12 21 91 range 12 21
@@ -181,19 +190,11 @@ config DEBUG_MUTEXES
181 This feature allows mutex semantics violations to be detected and 190 This feature allows mutex semantics violations to be detected and
182 reported. 191 reported.
183 192
184config DEBUG_RWSEMS
185 bool "RW-sem debugging: basic checks"
186 depends on DEBUG_KERNEL
187 help
188 This feature allows read-write semaphore semantics violations to
189 be detected and reported.
190
191config DEBUG_LOCK_ALLOC 193config DEBUG_LOCK_ALLOC
192 bool "Lock debugging: detect incorrect freeing of live locks" 194 bool "Lock debugging: detect incorrect freeing of live locks"
193 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 195 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
194 select DEBUG_SPINLOCK 196 select DEBUG_SPINLOCK
195 select DEBUG_MUTEXES 197 select DEBUG_MUTEXES
196 select DEBUG_RWSEMS
197 select LOCKDEP 198 select LOCKDEP
198 help 199 help
199 This feature will check whether any held lock (spinlock, rwlock, 200 This feature will check whether any held lock (spinlock, rwlock,
@@ -209,7 +210,6 @@ config PROVE_LOCKING
209 select LOCKDEP 210 select LOCKDEP
210 select DEBUG_SPINLOCK 211 select DEBUG_SPINLOCK
211 select DEBUG_MUTEXES 212 select DEBUG_MUTEXES
212 select DEBUG_RWSEMS
213 select DEBUG_LOCK_ALLOC 213 select DEBUG_LOCK_ALLOC
214 default n 214 default n
215 help 215 help
diff --git a/lib/Makefile b/lib/Makefile
index 77b4bad7d441..992a39ef9ffd 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o 8 sha1.o irq_regs.o reciprocal_div.o
9 9
@@ -12,14 +12,15 @@ lib-$(CONFIG_SMP) += cpumask.o
12 12
13lib-y += kobject.o kref.o kobject_uevent.o klist.o 13lib-y += kobject.o kref.o kobject_uevent.o klist.o
14 14
15obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o 15obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o
16 16
17ifeq ($(CONFIG_DEBUG_KOBJECT),y) 17ifeq ($(CONFIG_DEBUG_KOBJECT),y)
18CFLAGS_kobject.o += -DDEBUG 18CFLAGS_kobject.o += -DDEBUG
19CFLAGS_kobject_uevent.o += -DDEBUG 19CFLAGS_kobject_uevent.o += -DDEBUG
20endif 20endif
21 21
22obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o 22obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
23obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
23obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o 24obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
24obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o 25obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
25lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o 26lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
@@ -41,7 +42,6 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
41obj-$(CONFIG_CRC16) += crc16.o 42obj-$(CONFIG_CRC16) += crc16.o
42obj-$(CONFIG_CRC32) += crc32.o 43obj-$(CONFIG_CRC32) += crc32.o
43obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 44obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
44obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
45obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o 45obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
46 46
47obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ 47obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 037fa9aa2ed7..ee6e58fce8f7 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -95,7 +95,7 @@ void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits)
95} 95}
96EXPORT_SYMBOL(__bitmap_complement); 96EXPORT_SYMBOL(__bitmap_complement);
97 97
98/* 98/**
99 * __bitmap_shift_right - logical right shift of the bits in a bitmap 99 * __bitmap_shift_right - logical right shift of the bits in a bitmap
100 * @dst - destination bitmap 100 * @dst - destination bitmap
101 * @src - source bitmap 101 * @src - source bitmap
@@ -139,7 +139,7 @@ void __bitmap_shift_right(unsigned long *dst,
139EXPORT_SYMBOL(__bitmap_shift_right); 139EXPORT_SYMBOL(__bitmap_shift_right);
140 140
141 141
142/* 142/**
143 * __bitmap_shift_left - logical left shift of the bits in a bitmap 143 * __bitmap_shift_left - logical left shift of the bits in a bitmap
144 * @dst - destination bitmap 144 * @dst - destination bitmap
145 * @src - source bitmap 145 * @src - source bitmap
@@ -529,7 +529,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits)
529} 529}
530EXPORT_SYMBOL(bitmap_parselist); 530EXPORT_SYMBOL(bitmap_parselist);
531 531
532/* 532/**
533 * bitmap_pos_to_ord(buf, pos, bits) 533 * bitmap_pos_to_ord(buf, pos, bits)
534 * @buf: pointer to a bitmap 534 * @buf: pointer to a bitmap
535 * @pos: a bit position in @buf (0 <= @pos < @bits) 535 * @pos: a bit position in @buf (0 <= @pos < @bits)
@@ -804,7 +804,7 @@ EXPORT_SYMBOL(bitmap_find_free_region);
804 * @pos: beginning of bit region to release 804 * @pos: beginning of bit region to release
805 * @order: region size (log base 2 of number of bits) to release 805 * @order: region size (log base 2 of number of bits) to release
806 * 806 *
807 * This is the complement to __bitmap_find_free_region and releases 807 * This is the complement to __bitmap_find_free_region() and releases
808 * the found region (by clearing it in the bitmap). 808 * the found region (by clearing it in the bitmap).
809 * 809 *
810 * No return value. 810 * No return value.
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c
index a2055bc3ef62..accb35658169 100644
--- a/lib/bust_spinlocks.c
+++ b/lib/bust_spinlocks.c
@@ -14,24 +14,16 @@
14#include <linux/vt_kern.h> 14#include <linux/vt_kern.h>
15 15
16 16
17void bust_spinlocks(int yes) 17void __attribute__((weak)) bust_spinlocks(int yes)
18{ 18{
19 if (yes) { 19 if (yes) {
20 oops_in_progress = 1; 20 oops_in_progress = 1;
21 } else { 21 } else {
22 int loglevel_save = console_loglevel;
23#ifdef CONFIG_VT 22#ifdef CONFIG_VT
24 unblank_screen(); 23 unblank_screen();
25#endif 24#endif
26 oops_in_progress = 0; 25 oops_in_progress = 0;
27 /* 26 wake_up_klogd();
28 * OK, the message is on the console. Now we call printk()
29 * without oops_in_progress set so that printk() will give klogd
30 * and the blanked console a poke. Hold onto your hats...
31 */
32 console_loglevel = 15; /* NMI oopser may have shut the console up */
33 printk(" ");
34 console_loglevel = loglevel_save;
35 } 27 }
36} 28}
37 29
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8a5b5303bd4f..f596c08d213a 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -43,10 +43,10 @@ static int get_range(char **str, int *pint)
43 * comma as well. 43 * comma as well.
44 * 44 *
45 * Return values: 45 * Return values:
46 * 0 : no int in string 46 * 0 - no int in string
47 * 1 : int found, no subsequent comma 47 * 1 - int found, no subsequent comma
48 * 2 : int found including a subsequent comma 48 * 2 - int found including a subsequent comma
49 * 3 : hyphen found to denote a range 49 * 3 - hyphen found to denote a range
50 */ 50 */
51 51
52int get_option (char **str, int *pint) 52int get_option (char **str, int *pint)
diff --git a/lib/devres.c b/lib/devres.c
new file mode 100644
index 000000000000..2a668dd7cac7
--- /dev/null
+++ b/lib/devres.c
@@ -0,0 +1,300 @@
1#include <linux/pci.h>
2#include <linux/io.h>
3#include <linux/module.h>
4
5static void devm_ioremap_release(struct device *dev, void *res)
6{
7 iounmap(*(void __iomem **)res);
8}
9
10static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
11{
12 return *(void **)res == match_data;
13}
14
15/**
16 * devm_ioremap - Managed ioremap()
17 * @dev: Generic device to remap IO address for
18 * @offset: BUS offset to map
19 * @size: Size of map
20 *
21 * Managed ioremap(). Map is automatically unmapped on driver detach.
22 */
23void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
24 unsigned long size)
25{
26 void __iomem **ptr, *addr;
27
28 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
29 if (!ptr)
30 return NULL;
31
32 addr = ioremap(offset, size);
33 if (addr) {
34 *ptr = addr;
35 devres_add(dev, ptr);
36 } else
37 devres_free(ptr);
38
39 return addr;
40}
41EXPORT_SYMBOL(devm_ioremap);
42
43/**
44 * devm_ioremap_nocache - Managed ioremap_nocache()
45 * @dev: Generic device to remap IO address for
46 * @offset: BUS offset to map
47 * @size: Size of map
48 *
49 * Managed ioremap_nocache(). Map is automatically unmapped on driver
50 * detach.
51 */
52void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
53 unsigned long size)
54{
55 void __iomem **ptr, *addr;
56
57 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
58 if (!ptr)
59 return NULL;
60
61 addr = ioremap_nocache(offset, size);
62 if (addr) {
63 *ptr = addr;
64 devres_add(dev, ptr);
65 } else
66 devres_free(ptr);
67
68 return addr;
69}
70EXPORT_SYMBOL(devm_ioremap_nocache);
71
72/**
73 * devm_iounmap - Managed iounmap()
74 * @dev: Generic device to unmap for
75 * @addr: Address to unmap
76 *
77 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
78 */
79void devm_iounmap(struct device *dev, void __iomem *addr)
80{
81 iounmap(addr);
82 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
83 (void *)addr));
84}
85EXPORT_SYMBOL(devm_iounmap);
86
87#ifdef CONFIG_HAS_IOPORT
88/*
89 * Generic iomap devres
90 */
91static void devm_ioport_map_release(struct device *dev, void *res)
92{
93 ioport_unmap(*(void __iomem **)res);
94}
95
96static int devm_ioport_map_match(struct device *dev, void *res,
97 void *match_data)
98{
99 return *(void **)res == match_data;
100}
101
102/**
103 * devm_ioport_map - Managed ioport_map()
104 * @dev: Generic device to map ioport for
105 * @port: Port to map
106 * @nr: Number of ports to map
107 *
108 * Managed ioport_map(). Map is automatically unmapped on driver
109 * detach.
110 */
111void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
112 unsigned int nr)
113{
114 void __iomem **ptr, *addr;
115
116 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
117 if (!ptr)
118 return NULL;
119
120 addr = ioport_map(port, nr);
121 if (addr) {
122 *ptr = addr;
123 devres_add(dev, ptr);
124 } else
125 devres_free(ptr);
126
127 return addr;
128}
129EXPORT_SYMBOL(devm_ioport_map);
130
131/**
132 * devm_ioport_unmap - Managed ioport_unmap()
133 * @dev: Generic device to unmap for
134 * @addr: Address to unmap
135 *
136 * Managed ioport_unmap(). @addr must have been mapped using
137 * devm_ioport_map().
138 */
139void devm_ioport_unmap(struct device *dev, void __iomem *addr)
140{
141 ioport_unmap(addr);
142 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
143 devm_ioport_map_match, (void *)addr));
144}
145EXPORT_SYMBOL(devm_ioport_unmap);
146
147#ifdef CONFIG_PCI
148/*
149 * PCI iomap devres
150 */
151#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
152
153struct pcim_iomap_devres {
154 void __iomem *table[PCIM_IOMAP_MAX];
155};
156
157static void pcim_iomap_release(struct device *gendev, void *res)
158{
159 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
160 struct pcim_iomap_devres *this = res;
161 int i;
162
163 for (i = 0; i < PCIM_IOMAP_MAX; i++)
164 if (this->table[i])
165 pci_iounmap(dev, this->table[i]);
166}
167
168/**
169 * pcim_iomap_table - access iomap allocation table
170 * @pdev: PCI device to access iomap table for
171 *
172 * Access iomap allocation table for @dev. If iomap table doesn't
173 * exist and @pdev is managed, it will be allocated. All iomaps
174 * recorded in the iomap table are automatically unmapped on driver
175 * detach.
176 *
177 * This function might sleep when the table is first allocated but can
178 * be safely called without context and guaranteed to succed once
179 * allocated.
180 */
181void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
182{
183 struct pcim_iomap_devres *dr, *new_dr;
184
185 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
186 if (dr)
187 return dr->table;
188
189 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
190 if (!new_dr)
191 return NULL;
192 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
193 return dr->table;
194}
195EXPORT_SYMBOL(pcim_iomap_table);
196
197/**
198 * pcim_iomap - Managed pcim_iomap()
199 * @pdev: PCI device to iomap for
200 * @bar: BAR to iomap
201 * @maxlen: Maximum length of iomap
202 *
203 * Managed pci_iomap(). Map is automatically unmapped on driver
204 * detach.
205 */
206void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
207{
208 void __iomem **tbl;
209
210 BUG_ON(bar >= PCIM_IOMAP_MAX);
211
212 tbl = (void __iomem **)pcim_iomap_table(pdev);
213 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
214 return NULL;
215
216 tbl[bar] = pci_iomap(pdev, bar, maxlen);
217 return tbl[bar];
218}
219EXPORT_SYMBOL(pcim_iomap);
220
221/**
222 * pcim_iounmap - Managed pci_iounmap()
223 * @pdev: PCI device to iounmap for
224 * @addr: Address to unmap
225 *
226 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
227 */
228void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
229{
230 void __iomem **tbl;
231 int i;
232
233 pci_iounmap(pdev, addr);
234
235 tbl = (void __iomem **)pcim_iomap_table(pdev);
236 BUG_ON(!tbl);
237
238 for (i = 0; i < PCIM_IOMAP_MAX; i++)
239 if (tbl[i] == addr) {
240 tbl[i] = NULL;
241 return;
242 }
243 WARN_ON(1);
244}
245EXPORT_SYMBOL(pcim_iounmap);
246
247/**
248 * pcim_iomap_regions - Request and iomap PCI BARs
249 * @pdev: PCI device to map IO resources for
250 * @mask: Mask of BARs to request and iomap
251 * @name: Name used when requesting regions
252 *
253 * Request and iomap regions specified by @mask.
254 */
255int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
256{
257 void __iomem * const *iomap;
258 int i, rc;
259
260 iomap = pcim_iomap_table(pdev);
261 if (!iomap)
262 return -ENOMEM;
263
264 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
265 unsigned long len;
266
267 if (!(mask & (1 << i)))
268 continue;
269
270 rc = -EINVAL;
271 len = pci_resource_len(pdev, i);
272 if (!len)
273 goto err_inval;
274
275 rc = pci_request_region(pdev, i, name);
276 if (rc)
277 goto err_region;
278
279 rc = -ENOMEM;
280 if (!pcim_iomap(pdev, i, 0))
281 goto err_iomap;
282 }
283
284 return 0;
285
286 err_iomap:
287 pcim_iounmap(pdev, iomap[i]);
288 err_region:
289 pci_release_region(pdev, i);
290 err_inval:
291 while (--i >= 0) {
292 pcim_iounmap(pdev, iomap[i]);
293 pci_release_region(pdev, i);
294 }
295
296 return rc;
297}
298EXPORT_SYMBOL(pcim_iomap_regions);
299#endif
300#endif
diff --git a/lib/idr.c b/lib/idr.c
index 71853531d3b0..305117ca2d41 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -329,8 +329,8 @@ static void sub_remove(struct idr *idp, int shift, int id)
329 329
330/** 330/**
331 * idr_remove - remove the given id and free it's slot 331 * idr_remove - remove the given id and free it's slot
332 * idp: idr handle 332 * @idp: idr handle
333 * id: uniqueue key 333 * @id: unique key
334 */ 334 */
335void idr_remove(struct idr *idp, int id) 335void idr_remove(struct idr *idp, int id)
336{ 336{
diff --git a/lib/iomap.c b/lib/iomap.c
index d6ccdd85df53..4d43f37c0154 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -4,8 +4,9 @@
4 * (C) Copyright 2004 Linus Torvalds 4 * (C) Copyright 2004 Linus Torvalds
5 */ 5 */
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/io.h>
8
7#include <linux/module.h> 9#include <linux/module.h>
8#include <asm/io.h>
9 10
10/* 11/*
11 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO 12 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
diff --git a/lib/kobject.c b/lib/kobject.c
index 7ce6dc138e90..2782f49e906e 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -44,11 +44,11 @@ static int populate_dir(struct kobject * kobj)
44 return error; 44 return error;
45} 45}
46 46
47static int create_dir(struct kobject * kobj) 47static int create_dir(struct kobject * kobj, struct dentry *shadow_parent)
48{ 48{
49 int error = 0; 49 int error = 0;
50 if (kobject_name(kobj)) { 50 if (kobject_name(kobj)) {
51 error = sysfs_create_dir(kobj); 51 error = sysfs_create_dir(kobj, shadow_parent);
52 if (!error) { 52 if (!error) {
53 if ((error = populate_dir(kobj))) 53 if ((error = populate_dir(kobj)))
54 sysfs_remove_dir(kobj); 54 sysfs_remove_dir(kobj);
@@ -97,11 +97,12 @@ static void fill_kobj_path(struct kobject *kobj, char *path, int length)
97} 97}
98 98
99/** 99/**
100 * kobject_get_path - generate and return the path associated with a given kobj 100 * kobject_get_path - generate and return the path associated with a given kobj and kset pair.
101 * and kset pair. The result must be freed by the caller with kfree().
102 * 101 *
103 * @kobj: kobject in question, with which to build the path 102 * @kobj: kobject in question, with which to build the path
104 * @gfp_mask: the allocation type used to allocate the path 103 * @gfp_mask: the allocation type used to allocate the path
104 *
105 * The result must be freed by the caller with kfree().
105 */ 106 */
106char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask) 107char *kobject_get_path(struct kobject *kobj, gfp_t gfp_mask)
107{ 108{
@@ -126,6 +127,8 @@ EXPORT_SYMBOL_GPL(kobject_get_path);
126 */ 127 */
127void kobject_init(struct kobject * kobj) 128void kobject_init(struct kobject * kobj)
128{ 129{
130 if (!kobj)
131 return;
129 kref_init(&kobj->kref); 132 kref_init(&kobj->kref);
130 INIT_LIST_HEAD(&kobj->entry); 133 INIT_LIST_HEAD(&kobj->entry);
131 init_waitqueue_head(&kobj->poll); 134 init_waitqueue_head(&kobj->poll);
@@ -156,9 +159,10 @@ static void unlink(struct kobject * kobj)
156/** 159/**
157 * kobject_add - add an object to the hierarchy. 160 * kobject_add - add an object to the hierarchy.
158 * @kobj: object. 161 * @kobj: object.
162 * @shadow_parent: sysfs directory to add to.
159 */ 163 */
160 164
161int kobject_add(struct kobject * kobj) 165int kobject_shadow_add(struct kobject * kobj, struct dentry *shadow_parent)
162{ 166{
163 int error = 0; 167 int error = 0;
164 struct kobject * parent; 168 struct kobject * parent;
@@ -189,12 +193,11 @@ int kobject_add(struct kobject * kobj)
189 } 193 }
190 kobj->parent = parent; 194 kobj->parent = parent;
191 195
192 error = create_dir(kobj); 196 error = create_dir(kobj, shadow_parent);
193 if (error) { 197 if (error) {
194 /* unlink does the kobject_put() for us */ 198 /* unlink does the kobject_put() for us */
195 unlink(kobj); 199 unlink(kobj);
196 if (parent) 200 kobject_put(parent);
197 kobject_put(parent);
198 201
199 /* be noisy on error issues */ 202 /* be noisy on error issues */
200 if (error == -EEXIST) 203 if (error == -EEXIST)
@@ -211,6 +214,15 @@ int kobject_add(struct kobject * kobj)
211 return error; 214 return error;
212} 215}
213 216
217/**
218 * kobject_add - add an object to the hierarchy.
219 * @kobj: object.
220 */
221int kobject_add(struct kobject * kobj)
222{
223 return kobject_shadow_add(kobj, NULL);
224}
225
214 226
215/** 227/**
216 * kobject_register - initialize and add an object. 228 * kobject_register - initialize and add an object.
@@ -303,7 +315,29 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
303 kobj = kobject_get(kobj); 315 kobj = kobject_get(kobj);
304 if (!kobj) 316 if (!kobj)
305 return -EINVAL; 317 return -EINVAL;
306 error = sysfs_rename_dir(kobj, new_name); 318 if (!kobj->parent)
319 return -EINVAL;
320 error = sysfs_rename_dir(kobj, kobj->parent->dentry, new_name);
321 kobject_put(kobj);
322
323 return error;
324}
325
326/**
327 * kobject_rename - change the name of an object
328 * @kobj: object in question.
329 * @new_name: object's new name
330 */
331
332int kobject_shadow_rename(struct kobject * kobj, struct dentry *new_parent,
333 const char *new_name)
334{
335 int error = 0;
336
337 kobj = kobject_get(kobj);
338 if (!kobj)
339 return -EINVAL;
340 error = sysfs_rename_dir(kobj, new_parent, new_name);
307 kobject_put(kobj); 341 kobject_put(kobj);
308 342
309 return error; 343 return error;
@@ -312,7 +346,7 @@ int kobject_rename(struct kobject * kobj, const char *new_name)
312/** 346/**
313 * kobject_move - move object to another parent 347 * kobject_move - move object to another parent
314 * @kobj: object in question. 348 * @kobj: object in question.
315 * @new_parent: object's new parent 349 * @new_parent: object's new parent (can be NULL)
316 */ 350 */
317 351
318int kobject_move(struct kobject *kobj, struct kobject *new_parent) 352int kobject_move(struct kobject *kobj, struct kobject *new_parent)
@@ -328,8 +362,8 @@ int kobject_move(struct kobject *kobj, struct kobject *new_parent)
328 return -EINVAL; 362 return -EINVAL;
329 new_parent = kobject_get(new_parent); 363 new_parent = kobject_get(new_parent);
330 if (!new_parent) { 364 if (!new_parent) {
331 error = -EINVAL; 365 if (kobj->kset)
332 goto out; 366 new_parent = kobject_get(&kobj->kset->kobj);
333 } 367 }
334 /* old object path */ 368 /* old object path */
335 devpath = kobject_get_path(kobj, GFP_KERNEL); 369 devpath = kobject_get_path(kobj, GFP_KERNEL);
@@ -366,6 +400,8 @@ out:
366 400
367void kobject_del(struct kobject * kobj) 401void kobject_del(struct kobject * kobj)
368{ 402{
403 if (!kobj)
404 return;
369 sysfs_remove_dir(kobj); 405 sysfs_remove_dir(kobj);
370 unlink(kobj); 406 unlink(kobj);
371} 407}
@@ -377,6 +413,8 @@ void kobject_del(struct kobject * kobj)
377 413
378void kobject_unregister(struct kobject * kobj) 414void kobject_unregister(struct kobject * kobj)
379{ 415{
416 if (!kobj)
417 return;
380 pr_debug("kobject %s: unregistering\n",kobject_name(kobj)); 418 pr_debug("kobject %s: unregistering\n",kobject_name(kobj));
381 kobject_uevent(kobj, KOBJ_REMOVE); 419 kobject_uevent(kobj, KOBJ_REMOVE);
382 kobject_del(kobj); 420 kobject_del(kobj);
@@ -414,8 +452,7 @@ void kobject_cleanup(struct kobject * kobj)
414 t->release(kobj); 452 t->release(kobj);
415 if (s) 453 if (s)
416 kset_put(s); 454 kset_put(s);
417 if (parent) 455 kobject_put(parent);
418 kobject_put(parent);
419} 456}
420 457
421static void kobject_release(struct kref *kref) 458static void kobject_release(struct kref *kref)
@@ -523,6 +560,8 @@ int kset_add(struct kset * k)
523 560
524int kset_register(struct kset * k) 561int kset_register(struct kset * k)
525{ 562{
563 if (!k)
564 return -EINVAL;
526 kset_init(k); 565 kset_init(k);
527 return kset_add(k); 566 return kset_add(k);
528} 567}
@@ -535,6 +574,8 @@ int kset_register(struct kset * k)
535 574
536void kset_unregister(struct kset * k) 575void kset_unregister(struct kset * k)
537{ 576{
577 if (!k)
578 return;
538 kobject_unregister(&k->kobj); 579 kobject_unregister(&k->kobj);
539} 580}
540 581
@@ -586,6 +627,9 @@ int subsystem_register(struct subsystem * s)
586{ 627{
587 int error; 628 int error;
588 629
630 if (!s)
631 return -EINVAL;
632
589 subsystem_init(s); 633 subsystem_init(s);
590 pr_debug("subsystem %s: registering\n",s->kset.kobj.name); 634 pr_debug("subsystem %s: registering\n",s->kset.kobj.name);
591 635
@@ -598,6 +642,8 @@ int subsystem_register(struct subsystem * s)
598 642
599void subsystem_unregister(struct subsystem * s) 643void subsystem_unregister(struct subsystem * s)
600{ 644{
645 if (!s)
646 return;
601 pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name); 647 pr_debug("subsystem %s: unregistering\n",s->kset.kobj.name);
602 kset_unregister(&s->kset); 648 kset_unregister(&s->kset);
603} 649}
@@ -612,6 +658,10 @@ void subsystem_unregister(struct subsystem * s)
612int subsys_create_file(struct subsystem * s, struct subsys_attribute * a) 658int subsys_create_file(struct subsystem * s, struct subsys_attribute * a)
613{ 659{
614 int error = 0; 660 int error = 0;
661
662 if (!s || !a)
663 return -EINVAL;
664
615 if (subsys_get(s)) { 665 if (subsys_get(s)) {
616 error = sysfs_create_file(&s->kset.kobj,&a->attr); 666 error = sysfs_create_file(&s->kset.kobj,&a->attr);
617 subsys_put(s); 667 subsys_put(s);
diff --git a/lib/sha1.c b/lib/sha1.c
index 1cdabe3065f9..4c45fd50e913 100644
--- a/lib/sha1.c
+++ b/lib/sha1.c
@@ -20,8 +20,8 @@
20#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */ 20#define K3 0x8F1BBCDCL /* Rounds 40-59: sqrt(5) * 2^30 */
21#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */ 21#define K4 0xCA62C1D6L /* Rounds 60-79: sqrt(10) * 2^30 */
22 22
23/* 23/**
24 * sha_transform: single block SHA1 transform 24 * sha_transform - single block SHA1 transform
25 * 25 *
26 * @digest: 160 bit digest to update 26 * @digest: 160 bit digest to update
27 * @data: 512 bits of data to hash 27 * @data: 512 bits of data to hash
@@ -80,9 +80,8 @@ void sha_transform(__u32 *digest, const char *in, __u32 *W)
80} 80}
81EXPORT_SYMBOL(sha_transform); 81EXPORT_SYMBOL(sha_transform);
82 82
83/* 83/**
84 * sha_init: initialize the vectors for a SHA1 digest 84 * sha_init - initialize the vectors for a SHA1 digest
85 *
86 * @buf: vector to initialize 85 * @buf: vector to initialize
87 */ 86 */
88void sha_init(__u32 *buf) 87void sha_init(__u32 *buf)
diff --git a/lib/sort.c b/lib/sort.c
index 488788b341cb..961567894d16 100644
--- a/lib/sort.c
+++ b/lib/sort.c
@@ -27,7 +27,7 @@ static void generic_swap(void *a, void *b, int size)
27 } while (--size > 0); 27 } while (--size > 0);
28} 28}
29 29
30/* 30/**
31 * sort - sort an array of elements 31 * sort - sort an array of elements
32 * @base: pointer to data to sort 32 * @base: pointer to data to sort
33 * @num: number of elements 33 * @num: number of elements
diff --git a/lib/string.c b/lib/string.c
index a485d75962af..bab440fb0dfc 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -160,7 +160,7 @@ EXPORT_SYMBOL(strcat);
160 * @src: The string to append to it 160 * @src: The string to append to it
161 * @count: The maximum numbers of bytes to copy 161 * @count: The maximum numbers of bytes to copy
162 * 162 *
163 * Note that in contrast to strncpy, strncat ensures the result is 163 * Note that in contrast to strncpy(), strncat() ensures the result is
164 * terminated. 164 * terminated.
165 */ 165 */
166char *strncat(char *dest, const char *src, size_t count) 166char *strncat(char *dest, const char *src, size_t count)
@@ -366,8 +366,7 @@ EXPORT_SYMBOL(strnlen);
366 366
367#ifndef __HAVE_ARCH_STRSPN 367#ifndef __HAVE_ARCH_STRSPN
368/** 368/**
369 * strspn - Calculate the length of the initial substring of @s which only 369 * strspn - Calculate the length of the initial substring of @s which only contain letters in @accept
370 * contain letters in @accept
371 * @s: The string to be searched 370 * @s: The string to be searched
372 * @accept: The string to search for 371 * @accept: The string to search for
373 */ 372 */
@@ -394,8 +393,7 @@ EXPORT_SYMBOL(strspn);
394 393
395#ifndef __HAVE_ARCH_STRCSPN 394#ifndef __HAVE_ARCH_STRCSPN
396/** 395/**
397 * strcspn - Calculate the length of the initial substring of @s which does 396 * strcspn - Calculate the length of the initial substring of @s which does not contain letters in @reject
398 * not contain letters in @reject
399 * @s: The string to be searched 397 * @s: The string to be searched
400 * @reject: The string to avoid 398 * @reject: The string to avoid
401 */ 399 */
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 10625785eefd..623a68af8b18 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Dynamic DMA mapping support. 2 * Dynamic DMA mapping support.
3 * 3 *
4 * This implementation is for IA-64 and EM64T platforms that do not support 4 * This implementation is a fallback for platforms that do not support
5 * I/O TLBs (aka DMA address translation hardware). 5 * I/O TLBs (aka DMA address translation hardware).
6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com> 6 * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com> 7 * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
@@ -28,6 +28,7 @@
28#include <asm/io.h> 28#include <asm/io.h>
29#include <asm/dma.h> 29#include <asm/dma.h>
30#include <asm/scatterlist.h> 30#include <asm/scatterlist.h>
31#include <asm/swiotlb.h>
31 32
32#include <linux/init.h> 33#include <linux/init.h>
33#include <linux/bootmem.h> 34#include <linux/bootmem.h>
@@ -35,8 +36,10 @@
35#define OFFSET(val,align) ((unsigned long) \ 36#define OFFSET(val,align) ((unsigned long) \
36 ( (val) & ( (align) - 1))) 37 ( (val) & ( (align) - 1)))
37 38
39#ifndef SG_ENT_VIRT_ADDRESS
38#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset) 40#define SG_ENT_VIRT_ADDRESS(sg) (page_address((sg)->page) + (sg)->offset)
39#define SG_ENT_PHYS_ADDRESS(SG) virt_to_phys(SG_ENT_VIRT_ADDRESS(SG)) 41#define SG_ENT_PHYS_ADDRESS(sg) virt_to_bus(SG_ENT_VIRT_ADDRESS(sg))
42#endif
40 43
41/* 44/*
42 * Maximum allowable number of contiguous slabs to map, 45 * Maximum allowable number of contiguous slabs to map,
@@ -101,13 +104,25 @@ static unsigned int io_tlb_index;
101 * We need to save away the original address corresponding to a mapped entry 104 * We need to save away the original address corresponding to a mapped entry
102 * for the sync operations. 105 * for the sync operations.
103 */ 106 */
104static unsigned char **io_tlb_orig_addr; 107#ifndef SWIOTLB_ARCH_HAS_IO_TLB_ADDR_T
108typedef char *io_tlb_addr_t;
109#define swiotlb_orig_addr_null(buffer) (!(buffer))
110#define ptr_to_io_tlb_addr(ptr) (ptr)
111#define page_to_io_tlb_addr(pg, off) (page_address(pg) + (off))
112#define sg_to_io_tlb_addr(sg) SG_ENT_VIRT_ADDRESS(sg)
113#endif
114static io_tlb_addr_t *io_tlb_orig_addr;
105 115
106/* 116/*
107 * Protect the above data structures in the map and unmap calls 117 * Protect the above data structures in the map and unmap calls
108 */ 118 */
109static DEFINE_SPINLOCK(io_tlb_lock); 119static DEFINE_SPINLOCK(io_tlb_lock);
110 120
121#ifdef SWIOTLB_EXTRA_VARIABLES
122SWIOTLB_EXTRA_VARIABLES;
123#endif
124
125#ifndef SWIOTLB_ARCH_HAS_SETUP_IO_TLB_NPAGES
111static int __init 126static int __init
112setup_io_tlb_npages(char *str) 127setup_io_tlb_npages(char *str)
113{ 128{
@@ -122,30 +137,50 @@ setup_io_tlb_npages(char *str)
122 swiotlb_force = 1; 137 swiotlb_force = 1;
123 return 1; 138 return 1;
124} 139}
140#endif
125__setup("swiotlb=", setup_io_tlb_npages); 141__setup("swiotlb=", setup_io_tlb_npages);
126/* make io_tlb_overflow tunable too? */ 142/* make io_tlb_overflow tunable too? */
127 143
144#ifndef swiotlb_adjust_size
145#define swiotlb_adjust_size(size) ((void)0)
146#endif
147
148#ifndef swiotlb_adjust_seg
149#define swiotlb_adjust_seg(start, size) ((void)0)
150#endif
151
152#ifndef swiotlb_print_info
153#define swiotlb_print_info(bytes) \
154 printk(KERN_INFO "Placing %luMB software IO TLB between 0x%lx - " \
155 "0x%lx\n", bytes >> 20, \
156 virt_to_bus(io_tlb_start), virt_to_bus(io_tlb_end))
157#endif
158
128/* 159/*
129 * Statically reserve bounce buffer space and initialize bounce buffer data 160 * Statically reserve bounce buffer space and initialize bounce buffer data
130 * structures for the software IO TLB used to implement the DMA API. 161 * structures for the software IO TLB used to implement the DMA API.
131 */ 162 */
132void 163void __init
133swiotlb_init_with_default_size (size_t default_size) 164swiotlb_init_with_default_size(size_t default_size)
134{ 165{
135 unsigned long i; 166 unsigned long i, bytes;
136 167
137 if (!io_tlb_nslabs) { 168 if (!io_tlb_nslabs) {
138 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT); 169 io_tlb_nslabs = (default_size >> IO_TLB_SHIFT);
139 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE); 170 io_tlb_nslabs = ALIGN(io_tlb_nslabs, IO_TLB_SEGSIZE);
140 } 171 }
172 swiotlb_adjust_size(io_tlb_nslabs);
173 swiotlb_adjust_size(io_tlb_overflow);
174
175 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
141 176
142 /* 177 /*
143 * Get IO TLB memory from the low pages 178 * Get IO TLB memory from the low pages
144 */ 179 */
145 io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 180 io_tlb_start = alloc_bootmem_low_pages(bytes);
146 if (!io_tlb_start) 181 if (!io_tlb_start)
147 panic("Cannot allocate SWIOTLB buffer"); 182 panic("Cannot allocate SWIOTLB buffer");
148 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 183 io_tlb_end = io_tlb_start + bytes;
149 184
150 /* 185 /*
151 * Allocate and initialize the free list array. This array is used 186 * Allocate and initialize the free list array. This array is used
@@ -153,34 +188,45 @@ swiotlb_init_with_default_size (size_t default_size)
153 * between io_tlb_start and io_tlb_end. 188 * between io_tlb_start and io_tlb_end.
154 */ 189 */
155 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); 190 io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int));
156 for (i = 0; i < io_tlb_nslabs; i++) 191 for (i = 0; i < io_tlb_nslabs; i++) {
192 if ( !(i % IO_TLB_SEGSIZE) )
193 swiotlb_adjust_seg(io_tlb_start + (i << IO_TLB_SHIFT),
194 IO_TLB_SEGSIZE << IO_TLB_SHIFT);
157 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 195 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
196 }
158 io_tlb_index = 0; 197 io_tlb_index = 0;
159 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(char *)); 198 io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(io_tlb_addr_t));
160 199
161 /* 200 /*
162 * Get the overflow emergency buffer 201 * Get the overflow emergency buffer
163 */ 202 */
164 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); 203 io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
165 printk(KERN_INFO "Placing software IO TLB between 0x%lx - 0x%lx\n", 204 if (!io_tlb_overflow_buffer)
166 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end)); 205 panic("Cannot allocate SWIOTLB overflow buffer!\n");
206 swiotlb_adjust_seg(io_tlb_overflow_buffer, io_tlb_overflow);
207
208 swiotlb_print_info(bytes);
167} 209}
210#ifndef __swiotlb_init_with_default_size
211#define __swiotlb_init_with_default_size swiotlb_init_with_default_size
212#endif
168 213
169void 214void __init
170swiotlb_init (void) 215swiotlb_init(void)
171{ 216{
172 swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */ 217 __swiotlb_init_with_default_size(64 * (1<<20)); /* default to 64MB */
173} 218}
174 219
220#ifdef SWIOTLB_ARCH_NEED_LATE_INIT
175/* 221/*
176 * Systems with larger DMA zones (those that don't support ISA) can 222 * Systems with larger DMA zones (those that don't support ISA) can
177 * initialize the swiotlb later using the slab allocator if needed. 223 * initialize the swiotlb later using the slab allocator if needed.
178 * This should be just like above, but with some error catching. 224 * This should be just like above, but with some error catching.
179 */ 225 */
180int 226int
181swiotlb_late_init_with_default_size (size_t default_size) 227swiotlb_late_init_with_default_size(size_t default_size)
182{ 228{
183 unsigned long i, req_nslabs = io_tlb_nslabs; 229 unsigned long i, bytes, req_nslabs = io_tlb_nslabs;
184 unsigned int order; 230 unsigned int order;
185 231
186 if (!io_tlb_nslabs) { 232 if (!io_tlb_nslabs) {
@@ -191,8 +237,9 @@ swiotlb_late_init_with_default_size (size_t default_size)
191 /* 237 /*
192 * Get IO TLB memory from the low pages 238 * Get IO TLB memory from the low pages
193 */ 239 */
194 order = get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 240 order = get_order(io_tlb_nslabs << IO_TLB_SHIFT);
195 io_tlb_nslabs = SLABS_PER_PAGE << order; 241 io_tlb_nslabs = SLABS_PER_PAGE << order;
242 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
196 243
197 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { 244 while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
198 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN, 245 io_tlb_start = (char *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
@@ -205,13 +252,14 @@ swiotlb_late_init_with_default_size (size_t default_size)
205 if (!io_tlb_start) 252 if (!io_tlb_start)
206 goto cleanup1; 253 goto cleanup1;
207 254
208 if (order != get_order(io_tlb_nslabs * (1 << IO_TLB_SHIFT))) { 255 if (order != get_order(bytes)) {
209 printk(KERN_WARNING "Warning: only able to allocate %ld MB " 256 printk(KERN_WARNING "Warning: only able to allocate %ld MB "
210 "for software IO TLB\n", (PAGE_SIZE << order) >> 20); 257 "for software IO TLB\n", (PAGE_SIZE << order) >> 20);
211 io_tlb_nslabs = SLABS_PER_PAGE << order; 258 io_tlb_nslabs = SLABS_PER_PAGE << order;
259 bytes = io_tlb_nslabs << IO_TLB_SHIFT;
212 } 260 }
213 io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); 261 io_tlb_end = io_tlb_start + bytes;
214 memset(io_tlb_start, 0, io_tlb_nslabs * (1 << IO_TLB_SHIFT)); 262 memset(io_tlb_start, 0, bytes);
215 263
216 /* 264 /*
217 * Allocate and initialize the free list array. This array is used 265 * Allocate and initialize the free list array. This array is used
@@ -227,12 +275,12 @@ swiotlb_late_init_with_default_size (size_t default_size)
227 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); 275 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
228 io_tlb_index = 0; 276 io_tlb_index = 0;
229 277
230 io_tlb_orig_addr = (unsigned char **)__get_free_pages(GFP_KERNEL, 278 io_tlb_orig_addr = (io_tlb_addr_t *)__get_free_pages(GFP_KERNEL,
231 get_order(io_tlb_nslabs * sizeof(char *))); 279 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
232 if (!io_tlb_orig_addr) 280 if (!io_tlb_orig_addr)
233 goto cleanup3; 281 goto cleanup3;
234 282
235 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(char *)); 283 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(io_tlb_addr_t));
236 284
237 /* 285 /*
238 * Get the overflow emergency buffer 286 * Get the overflow emergency buffer
@@ -242,30 +290,30 @@ swiotlb_late_init_with_default_size (size_t default_size)
242 if (!io_tlb_overflow_buffer) 290 if (!io_tlb_overflow_buffer)
243 goto cleanup4; 291 goto cleanup4;
244 292
245 printk(KERN_INFO "Placing %ldMB software IO TLB between 0x%lx - " 293 swiotlb_print_info(bytes);
246 "0x%lx\n", (io_tlb_nslabs * (1 << IO_TLB_SHIFT)) >> 20,
247 virt_to_phys(io_tlb_start), virt_to_phys(io_tlb_end));
248 294
249 return 0; 295 return 0;
250 296
251cleanup4: 297cleanup4:
252 free_pages((unsigned long)io_tlb_orig_addr, get_order(io_tlb_nslabs * 298 free_pages((unsigned long)io_tlb_orig_addr,
253 sizeof(char *))); 299 get_order(io_tlb_nslabs * sizeof(io_tlb_addr_t)));
254 io_tlb_orig_addr = NULL; 300 io_tlb_orig_addr = NULL;
255cleanup3: 301cleanup3:
256 free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * 302 free_pages((unsigned long)io_tlb_list,
257 sizeof(int))); 303 get_order(io_tlb_nslabs * sizeof(int)));
258 io_tlb_list = NULL; 304 io_tlb_list = NULL;
259 io_tlb_end = NULL;
260cleanup2: 305cleanup2:
306 io_tlb_end = NULL;
261 free_pages((unsigned long)io_tlb_start, order); 307 free_pages((unsigned long)io_tlb_start, order);
262 io_tlb_start = NULL; 308 io_tlb_start = NULL;
263cleanup1: 309cleanup1:
264 io_tlb_nslabs = req_nslabs; 310 io_tlb_nslabs = req_nslabs;
265 return -ENOMEM; 311 return -ENOMEM;
266} 312}
313#endif
267 314
268static inline int 315#ifndef SWIOTLB_ARCH_HAS_NEEDS_MAPPING
316static int
269address_needs_mapping(struct device *hwdev, dma_addr_t addr) 317address_needs_mapping(struct device *hwdev, dma_addr_t addr)
270{ 318{
271 dma_addr_t mask = 0xffffffff; 319 dma_addr_t mask = 0xffffffff;
@@ -275,11 +323,35 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr)
275 return (addr & ~mask) != 0; 323 return (addr & ~mask) != 0;
276} 324}
277 325
326static inline int range_needs_mapping(const void *ptr, size_t size)
327{
328 return swiotlb_force;
329}
330
331static inline int order_needs_mapping(unsigned int order)
332{
333 return 0;
334}
335#endif
336
337static void
338__sync_single(io_tlb_addr_t buffer, char *dma_addr, size_t size, int dir)
339{
340#ifndef SWIOTLB_ARCH_HAS_SYNC_SINGLE
341 if (dir == DMA_TO_DEVICE)
342 memcpy(dma_addr, buffer, size);
343 else
344 memcpy(buffer, dma_addr, size);
345#else
346 __swiotlb_arch_sync_single(buffer, dma_addr, size, dir);
347#endif
348}
349
278/* 350/*
279 * Allocates bounce buffer and returns its kernel virtual address. 351 * Allocates bounce buffer and returns its kernel virtual address.
280 */ 352 */
281static void * 353static void *
282map_single(struct device *hwdev, char *buffer, size_t size, int dir) 354map_single(struct device *hwdev, io_tlb_addr_t buffer, size_t size, int dir)
283{ 355{
284 unsigned long flags; 356 unsigned long flags;
285 char *dma_addr; 357 char *dma_addr;
@@ -352,7 +424,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir)
352 */ 424 */
353 io_tlb_orig_addr[index] = buffer; 425 io_tlb_orig_addr[index] = buffer;
354 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) 426 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
355 memcpy(dma_addr, buffer, size); 427 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
356 428
357 return dma_addr; 429 return dma_addr;
358} 430}
@@ -366,17 +438,18 @@ unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
366 unsigned long flags; 438 unsigned long flags;
367 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; 439 int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
368 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 440 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
369 char *buffer = io_tlb_orig_addr[index]; 441 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
370 442
371 /* 443 /*
372 * First, sync the memory before unmapping the entry 444 * First, sync the memory before unmapping the entry
373 */ 445 */
374 if (buffer && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 446 if (!swiotlb_orig_addr_null(buffer)
447 && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
375 /* 448 /*
376 * bounce... copy the data back into the original buffer * and 449 * bounce... copy the data back into the original buffer * and
377 * delete the bounce buffer. 450 * delete the bounce buffer.
378 */ 451 */
379 memcpy(buffer, dma_addr, size); 452 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
380 453
381 /* 454 /*
382 * Return the buffer to the free list by setting the corresponding 455 * Return the buffer to the free list by setting the corresponding
@@ -409,18 +482,18 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
409 int dir, int target) 482 int dir, int target)
410{ 483{
411 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; 484 int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
412 char *buffer = io_tlb_orig_addr[index]; 485 io_tlb_addr_t buffer = io_tlb_orig_addr[index];
413 486
414 switch (target) { 487 switch (target) {
415 case SYNC_FOR_CPU: 488 case SYNC_FOR_CPU:
416 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) 489 if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL))
417 memcpy(buffer, dma_addr, size); 490 __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
418 else 491 else
419 BUG_ON(dir != DMA_TO_DEVICE); 492 BUG_ON(dir != DMA_TO_DEVICE);
420 break; 493 break;
421 case SYNC_FOR_DEVICE: 494 case SYNC_FOR_DEVICE:
422 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) 495 if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
423 memcpy(dma_addr, buffer, size); 496 __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
424 else 497 else
425 BUG_ON(dir != DMA_FROM_DEVICE); 498 BUG_ON(dir != DMA_FROM_DEVICE);
426 break; 499 break;
@@ -429,11 +502,13 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size,
429 } 502 }
430} 503}
431 504
505#ifdef SWIOTLB_ARCH_NEED_ALLOC
506
432void * 507void *
433swiotlb_alloc_coherent(struct device *hwdev, size_t size, 508swiotlb_alloc_coherent(struct device *hwdev, size_t size,
434 dma_addr_t *dma_handle, gfp_t flags) 509 dma_addr_t *dma_handle, gfp_t flags)
435{ 510{
436 unsigned long dev_addr; 511 dma_addr_t dev_addr;
437 void *ret; 512 void *ret;
438 int order = get_order(size); 513 int order = get_order(size);
439 514
@@ -444,8 +519,11 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
444 */ 519 */
445 flags |= GFP_DMA; 520 flags |= GFP_DMA;
446 521
447 ret = (void *)__get_free_pages(flags, order); 522 if (!order_needs_mapping(order))
448 if (ret && address_needs_mapping(hwdev, virt_to_phys(ret))) { 523 ret = (void *)__get_free_pages(flags, order);
524 else
525 ret = NULL;
526 if (ret && address_needs_mapping(hwdev, virt_to_bus(ret))) {
449 /* 527 /*
450 * The allocated memory isn't reachable by the device. 528 * The allocated memory isn't reachable by the device.
451 * Fall back on swiotlb_map_single(). 529 * Fall back on swiotlb_map_single().
@@ -465,22 +543,24 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
465 if (swiotlb_dma_mapping_error(handle)) 543 if (swiotlb_dma_mapping_error(handle))
466 return NULL; 544 return NULL;
467 545
468 ret = phys_to_virt(handle); 546 ret = bus_to_virt(handle);
469 } 547 }
470 548
471 memset(ret, 0, size); 549 memset(ret, 0, size);
472 dev_addr = virt_to_phys(ret); 550 dev_addr = virt_to_bus(ret);
473 551
474 /* Confirm address can be DMA'd by device */ 552 /* Confirm address can be DMA'd by device */
475 if (address_needs_mapping(hwdev, dev_addr)) { 553 if (address_needs_mapping(hwdev, dev_addr)) {
476 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016lx\n", 554 printk("hwdev DMA mask = 0x%016Lx, dev_addr = 0x%016Lx\n",
477 (unsigned long long)*hwdev->dma_mask, dev_addr); 555 (unsigned long long)*hwdev->dma_mask,
556 (unsigned long long)dev_addr);
478 panic("swiotlb_alloc_coherent: allocated memory is out of " 557 panic("swiotlb_alloc_coherent: allocated memory is out of "
479 "range for device"); 558 "range for device");
480 } 559 }
481 *dma_handle = dev_addr; 560 *dma_handle = dev_addr;
482 return ret; 561 return ret;
483} 562}
563EXPORT_SYMBOL(swiotlb_alloc_coherent);
484 564
485void 565void
486swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, 566swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
@@ -493,6 +573,9 @@ swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
493 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */ 573 /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
494 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE); 574 swiotlb_unmap_single (hwdev, dma_handle, size, DMA_TO_DEVICE);
495} 575}
576EXPORT_SYMBOL(swiotlb_free_coherent);
577
578#endif
496 579
497static void 580static void
498swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) 581swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
@@ -504,7 +587,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
504 * When the mapping is small enough return a static buffer to limit 587 * When the mapping is small enough return a static buffer to limit
505 * the damage, or panic when the transfer is too big. 588 * the damage, or panic when the transfer is too big.
506 */ 589 */
507 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " 590 printk(KERN_ERR "DMA: Out of SW-IOMMU space for %zu bytes at "
508 "device %s\n", size, dev ? dev->bus_id : "?"); 591 "device %s\n", size, dev ? dev->bus_id : "?");
509 592
510 if (size > io_tlb_overflow && do_panic) { 593 if (size > io_tlb_overflow && do_panic) {
@@ -525,7 +608,7 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
525dma_addr_t 608dma_addr_t
526swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) 609swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
527{ 610{
528 unsigned long dev_addr = virt_to_phys(ptr); 611 dma_addr_t dev_addr = virt_to_bus(ptr);
529 void *map; 612 void *map;
530 613
531 BUG_ON(dir == DMA_NONE); 614 BUG_ON(dir == DMA_NONE);
@@ -534,19 +617,20 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
534 * we can safely return the device addr and not worry about bounce 617 * we can safely return the device addr and not worry about bounce
535 * buffering it. 618 * buffering it.
536 */ 619 */
537 if (!address_needs_mapping(hwdev, dev_addr) && !swiotlb_force) 620 if (!range_needs_mapping(ptr, size)
621 && !address_needs_mapping(hwdev, dev_addr))
538 return dev_addr; 622 return dev_addr;
539 623
540 /* 624 /*
541 * Oh well, have to allocate and map a bounce buffer. 625 * Oh well, have to allocate and map a bounce buffer.
542 */ 626 */
543 map = map_single(hwdev, ptr, size, dir); 627 map = map_single(hwdev, ptr_to_io_tlb_addr(ptr), size, dir);
544 if (!map) { 628 if (!map) {
545 swiotlb_full(hwdev, size, dir, 1); 629 swiotlb_full(hwdev, size, dir, 1);
546 map = io_tlb_overflow_buffer; 630 map = io_tlb_overflow_buffer;
547 } 631 }
548 632
549 dev_addr = virt_to_phys(map); 633 dev_addr = virt_to_bus(map);
550 634
551 /* 635 /*
552 * Ensure that the address returned is DMA'ble 636 * Ensure that the address returned is DMA'ble
@@ -558,25 +642,6 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
558} 642}
559 643
560/* 644/*
561 * Since DMA is i-cache coherent, any (complete) pages that were written via
562 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
563 * flush them when they get mapped into an executable vm-area.
564 */
565static void
566mark_clean(void *addr, size_t size)
567{
568 unsigned long pg_addr, end;
569
570 pg_addr = PAGE_ALIGN((unsigned long) addr);
571 end = (unsigned long) addr + size;
572 while (pg_addr + PAGE_SIZE <= end) {
573 struct page *page = virt_to_page(pg_addr);
574 set_bit(PG_arch_1, &page->flags);
575 pg_addr += PAGE_SIZE;
576 }
577}
578
579/*
580 * Unmap a single streaming mode DMA translation. The dma_addr and size must 645 * Unmap a single streaming mode DMA translation. The dma_addr and size must
581 * match what was provided for in a previous swiotlb_map_single call. All 646 * match what was provided for in a previous swiotlb_map_single call. All
582 * other usages are undefined. 647 * other usages are undefined.
@@ -588,13 +653,13 @@ void
588swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, 653swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
589 int dir) 654 int dir)
590{ 655{
591 char *dma_addr = phys_to_virt(dev_addr); 656 char *dma_addr = bus_to_virt(dev_addr);
592 657
593 BUG_ON(dir == DMA_NONE); 658 BUG_ON(dir == DMA_NONE);
594 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 659 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
595 unmap_single(hwdev, dma_addr, size, dir); 660 unmap_single(hwdev, dma_addr, size, dir);
596 else if (dir == DMA_FROM_DEVICE) 661 else if (dir == DMA_FROM_DEVICE)
597 mark_clean(dma_addr, size); 662 dma_mark_clean(dma_addr, size);
598} 663}
599 664
600/* 665/*
@@ -607,17 +672,17 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
607 * address back to the card, you must first perform a 672 * address back to the card, you must first perform a
608 * swiotlb_dma_sync_for_device, and then the device again owns the buffer 673 * swiotlb_dma_sync_for_device, and then the device again owns the buffer
609 */ 674 */
610static inline void 675static void
611swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, 676swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
612 size_t size, int dir, int target) 677 size_t size, int dir, int target)
613{ 678{
614 char *dma_addr = phys_to_virt(dev_addr); 679 char *dma_addr = bus_to_virt(dev_addr);
615 680
616 BUG_ON(dir == DMA_NONE); 681 BUG_ON(dir == DMA_NONE);
617 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 682 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
618 sync_single(hwdev, dma_addr, size, dir, target); 683 sync_single(hwdev, dma_addr, size, dir, target);
619 else if (dir == DMA_FROM_DEVICE) 684 else if (dir == DMA_FROM_DEVICE)
620 mark_clean(dma_addr, size); 685 dma_mark_clean(dma_addr, size);
621} 686}
622 687
623void 688void
@@ -637,18 +702,18 @@ swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
637/* 702/*
638 * Same as above, but for a sub-range of the mapping. 703 * Same as above, but for a sub-range of the mapping.
639 */ 704 */
640static inline void 705static void
641swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, 706swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr,
642 unsigned long offset, size_t size, 707 unsigned long offset, size_t size,
643 int dir, int target) 708 int dir, int target)
644{ 709{
645 char *dma_addr = phys_to_virt(dev_addr) + offset; 710 char *dma_addr = bus_to_virt(dev_addr) + offset;
646 711
647 BUG_ON(dir == DMA_NONE); 712 BUG_ON(dir == DMA_NONE);
648 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) 713 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
649 sync_single(hwdev, dma_addr, size, dir, target); 714 sync_single(hwdev, dma_addr, size, dir, target);
650 else if (dir == DMA_FROM_DEVICE) 715 else if (dir == DMA_FROM_DEVICE)
651 mark_clean(dma_addr, size); 716 dma_mark_clean(dma_addr, size);
652} 717}
653 718
654void 719void
@@ -687,18 +752,16 @@ int
687swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, 752swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
688 int dir) 753 int dir)
689{ 754{
690 void *addr; 755 dma_addr_t dev_addr;
691 unsigned long dev_addr;
692 int i; 756 int i;
693 757
694 BUG_ON(dir == DMA_NONE); 758 BUG_ON(dir == DMA_NONE);
695 759
696 for (i = 0; i < nelems; i++, sg++) { 760 for (i = 0; i < nelems; i++, sg++) {
697 addr = SG_ENT_VIRT_ADDRESS(sg); 761 dev_addr = SG_ENT_PHYS_ADDRESS(sg);
698 dev_addr = virt_to_phys(addr); 762 if (range_needs_mapping(SG_ENT_VIRT_ADDRESS(sg), sg->length)
699 if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { 763 || address_needs_mapping(hwdev, dev_addr)) {
700 void *map = map_single(hwdev, addr, sg->length, dir); 764 void *map = map_single(hwdev, sg_to_io_tlb_addr(sg), sg->length, dir);
701 sg->dma_address = virt_to_bus(map);
702 if (!map) { 765 if (!map) {
703 /* Don't panic here, we expect map_sg users 766 /* Don't panic here, we expect map_sg users
704 to do proper error handling. */ 767 to do proper error handling. */
@@ -707,6 +770,7 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
707 sg[0].dma_length = 0; 770 sg[0].dma_length = 0;
708 return 0; 771 return 0;
709 } 772 }
773 sg->dma_address = virt_to_bus(map);
710 } else 774 } else
711 sg->dma_address = dev_addr; 775 sg->dma_address = dev_addr;
712 sg->dma_length = sg->length; 776 sg->dma_length = sg->length;
@@ -728,9 +792,10 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
728 792
729 for (i = 0; i < nelems; i++, sg++) 793 for (i = 0; i < nelems; i++, sg++)
730 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 794 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
731 unmap_single(hwdev, (void *) phys_to_virt(sg->dma_address), sg->dma_length, dir); 795 unmap_single(hwdev, bus_to_virt(sg->dma_address),
796 sg->dma_length, dir);
732 else if (dir == DMA_FROM_DEVICE) 797 else if (dir == DMA_FROM_DEVICE)
733 mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); 798 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
734} 799}
735 800
736/* 801/*
@@ -740,7 +805,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
740 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules 805 * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
741 * and usage. 806 * and usage.
742 */ 807 */
743static inline void 808static void
744swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, 809swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
745 int nelems, int dir, int target) 810 int nelems, int dir, int target)
746{ 811{
@@ -750,8 +815,10 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg,
750 815
751 for (i = 0; i < nelems; i++, sg++) 816 for (i = 0; i < nelems; i++, sg++)
752 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) 817 if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
753 sync_single(hwdev, (void *) sg->dma_address, 818 sync_single(hwdev, bus_to_virt(sg->dma_address),
754 sg->dma_length, dir, target); 819 sg->dma_length, dir, target);
820 else if (dir == DMA_FROM_DEVICE)
821 dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length);
755} 822}
756 823
757void 824void
@@ -768,10 +835,48 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
768 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); 835 swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
769} 836}
770 837
838#ifdef SWIOTLB_ARCH_NEED_MAP_PAGE
839
840dma_addr_t
841swiotlb_map_page(struct device *hwdev, struct page *page,
842 unsigned long offset, size_t size,
843 enum dma_data_direction direction)
844{
845 dma_addr_t dev_addr;
846 char *map;
847
848 dev_addr = page_to_bus(page) + offset;
849 if (address_needs_mapping(hwdev, dev_addr)) {
850 map = map_single(hwdev, page_to_io_tlb_addr(page, offset), size, direction);
851 if (!map) {
852 swiotlb_full(hwdev, size, direction, 1);
853 map = io_tlb_overflow_buffer;
854 }
855 dev_addr = virt_to_bus(map);
856 }
857
858 return dev_addr;
859}
860
861void
862swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
863 size_t size, enum dma_data_direction direction)
864{
865 char *dma_addr = bus_to_virt(dev_addr);
866
867 BUG_ON(direction == DMA_NONE);
868 if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end)
869 unmap_single(hwdev, dma_addr, size, direction);
870 else if (direction == DMA_FROM_DEVICE)
871 dma_mark_clean(dma_addr, size);
872}
873
874#endif
875
771int 876int
772swiotlb_dma_mapping_error(dma_addr_t dma_addr) 877swiotlb_dma_mapping_error(dma_addr_t dma_addr)
773{ 878{
774 return (dma_addr == virt_to_phys(io_tlb_overflow_buffer)); 879 return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
775} 880}
776 881
777/* 882/*
@@ -780,10 +885,13 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr)
780 * during bus mastering, then you would pass 0x00ffffff as the mask to 885 * during bus mastering, then you would pass 0x00ffffff as the mask to
781 * this function. 886 * this function.
782 */ 887 */
888#ifndef __swiotlb_dma_supported
889#define __swiotlb_dma_supported(hwdev, mask) (virt_to_bus(io_tlb_end - 1) <= (mask))
890#endif
783int 891int
784swiotlb_dma_supported (struct device *hwdev, u64 mask) 892swiotlb_dma_supported(struct device *hwdev, u64 mask)
785{ 893{
786 return (virt_to_phys (io_tlb_end) - 1) <= mask; 894 return __swiotlb_dma_supported(hwdev, mask);
787} 895}
788 896
789EXPORT_SYMBOL(swiotlb_init); 897EXPORT_SYMBOL(swiotlb_init);
@@ -798,6 +906,4 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device);
798EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); 906EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
799EXPORT_SYMBOL(swiotlb_sync_sg_for_device); 907EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
800EXPORT_SYMBOL(swiotlb_dma_mapping_error); 908EXPORT_SYMBOL(swiotlb_dma_mapping_error);
801EXPORT_SYMBOL(swiotlb_alloc_coherent);
802EXPORT_SYMBOL(swiotlb_free_coherent);
803EXPORT_SYMBOL(swiotlb_dma_supported); 909EXPORT_SYMBOL(swiotlb_dma_supported);
diff --git a/lib/textsearch.c b/lib/textsearch.c
index 98bcadc01185..9e2a002c5b54 100644
--- a/lib/textsearch.c
+++ b/lib/textsearch.c
@@ -218,7 +218,7 @@ static unsigned int get_linear_data(unsigned int consumed, const u8 **dst,
218 * Call textsearch_next() to retrieve subsequent matches. 218 * Call textsearch_next() to retrieve subsequent matches.
219 * 219 *
220 * Returns the position of first occurrence of the pattern or 220 * Returns the position of first occurrence of the pattern or
221 * UINT_MAX if no occurrence was found. 221 * %UINT_MAX if no occurrence was found.
222 */ 222 */
223unsigned int textsearch_find_continuous(struct ts_config *conf, 223unsigned int textsearch_find_continuous(struct ts_config *conf,
224 struct ts_state *state, 224 struct ts_state *state,
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index bed7229378f2..b025864d2e43 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -247,12 +247,12 @@ static char * number(char * buf, char * end, unsigned long long num, int base, i
247 * be generated for the given input, excluding the trailing 247 * be generated for the given input, excluding the trailing
248 * '\0', as per ISO C99. If you want to have the exact 248 * '\0', as per ISO C99. If you want to have the exact
249 * number of characters written into @buf as return value 249 * number of characters written into @buf as return value
250 * (not including the trailing '\0'), use vscnprintf. If the 250 * (not including the trailing '\0'), use vscnprintf(). If the
251 * return is greater than or equal to @size, the resulting 251 * return is greater than or equal to @size, the resulting
252 * string is truncated. 252 * string is truncated.
253 * 253 *
254 * Call this function if you are already dealing with a va_list. 254 * Call this function if you are already dealing with a va_list.
255 * You probably want snprintf instead. 255 * You probably want snprintf() instead.
256 */ 256 */
257int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) 257int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
258{ 258{
@@ -509,7 +509,7 @@ EXPORT_SYMBOL(vsnprintf);
509 * returns 0. 509 * returns 0.
510 * 510 *
511 * Call this function if you are already dealing with a va_list. 511 * Call this function if you are already dealing with a va_list.
512 * You probably want scnprintf instead. 512 * You probably want scnprintf() instead.
513 */ 513 */
514int vscnprintf(char *buf, size_t size, const char *fmt, va_list args) 514int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
515{ 515{
@@ -554,8 +554,7 @@ EXPORT_SYMBOL(snprintf);
554 * @...: Arguments for the format string 554 * @...: Arguments for the format string
555 * 555 *
556 * The return value is the number of characters written into @buf not including 556 * The return value is the number of characters written into @buf not including
557 * the trailing '\0'. If @size is <= 0 the function returns 0. If the return is 557 * the trailing '\0'. If @size is <= 0 the function returns 0.
558 * greater than or equal to @size, the resulting string is truncated.
559 */ 558 */
560 559
561int scnprintf(char * buf, size_t size, const char *fmt, ...) 560int scnprintf(char * buf, size_t size, const char *fmt, ...)
@@ -577,11 +576,11 @@ EXPORT_SYMBOL(scnprintf);
577 * @args: Arguments for the format string 576 * @args: Arguments for the format string
578 * 577 *
579 * The function returns the number of characters written 578 * The function returns the number of characters written
580 * into @buf. Use vsnprintf or vscnprintf in order to avoid 579 * into @buf. Use vsnprintf() or vscnprintf() in order to avoid
581 * buffer overflows. 580 * buffer overflows.
582 * 581 *
583 * Call this function if you are already dealing with a va_list. 582 * Call this function if you are already dealing with a va_list.
584 * You probably want sprintf instead. 583 * You probably want sprintf() instead.
585 */ 584 */
586int vsprintf(char *buf, const char *fmt, va_list args) 585int vsprintf(char *buf, const char *fmt, va_list args)
587{ 586{
@@ -597,7 +596,7 @@ EXPORT_SYMBOL(vsprintf);
597 * @...: Arguments for the format string 596 * @...: Arguments for the format string
598 * 597 *
599 * The function returns the number of characters written 598 * The function returns the number of characters written
600 * into @buf. Use snprintf or scnprintf in order to avoid 599 * into @buf. Use snprintf() or scnprintf() in order to avoid
601 * buffer overflows. 600 * buffer overflows.
602 */ 601 */
603int sprintf(char * buf, const char *fmt, ...) 602int sprintf(char * buf, const char *fmt, ...)