diff options
| author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:32:52 -0500 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:32:52 -0500 |
| commit | 4073723acb9cdcdbe4df9c0e0c376c65d1697e43 (patch) | |
| tree | f41c17eac157b1223ce104845cf9b1e5a9e6a83d /arch/arm/common | |
| parent | 58daf18cdcab550262a5f4681e1f1e073e21965a (diff) | |
| parent | 4ec3eb13634529c0bc7466658d84d0bbe3244aea (diff) | |
Merge branch 'misc' into devel
Conflicts:
arch/arm/Kconfig
arch/arm/common/Makefile
arch/arm/kernel/Makefile
arch/arm/kernel/smp.c
Diffstat (limited to 'arch/arm/common')
| -rw-r--r-- | arch/arm/common/Kconfig | 4 | ||||
| -rw-r--r-- | arch/arm/common/clkdev.c | 179 | ||||
| -rw-r--r-- | arch/arm/common/dmabounce.c | 16 |
3 files changed, 8 insertions, 191 deletions
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 0a34c8186924..778655f0257a 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
| @@ -37,7 +37,3 @@ config SHARP_PARAM | |||
| 37 | 37 | ||
| 38 | config SHARP_SCOOP | 38 | config SHARP_SCOOP |
| 39 | bool | 39 | bool |
| 40 | |||
| 41 | config COMMON_CLKDEV | ||
| 42 | bool | ||
| 43 | select HAVE_CLK | ||
diff --git a/arch/arm/common/clkdev.c b/arch/arm/common/clkdev.c deleted file mode 100644 index e2b2bb66e094..000000000000 --- a/arch/arm/common/clkdev.c +++ /dev/null | |||
| @@ -1,179 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * arch/arm/common/clkdev.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Russell King. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * Helper for the clk API to assist looking up a struct clk. | ||
| 11 | */ | ||
| 12 | #include <linux/module.h> | ||
| 13 | #include <linux/kernel.h> | ||
| 14 | #include <linux/device.h> | ||
| 15 | #include <linux/list.h> | ||
| 16 | #include <linux/errno.h> | ||
| 17 | #include <linux/err.h> | ||
| 18 | #include <linux/string.h> | ||
| 19 | #include <linux/mutex.h> | ||
| 20 | #include <linux/clk.h> | ||
| 21 | #include <linux/slab.h> | ||
| 22 | |||
| 23 | #include <asm/clkdev.h> | ||
| 24 | #include <mach/clkdev.h> | ||
| 25 | |||
| 26 | static LIST_HEAD(clocks); | ||
| 27 | static DEFINE_MUTEX(clocks_mutex); | ||
| 28 | |||
| 29 | /* | ||
| 30 | * Find the correct struct clk for the device and connection ID. | ||
| 31 | * We do slightly fuzzy matching here: | ||
| 32 | * An entry with a NULL ID is assumed to be a wildcard. | ||
| 33 | * If an entry has a device ID, it must match | ||
| 34 | * If an entry has a connection ID, it must match | ||
| 35 | * Then we take the most specific entry - with the following | ||
| 36 | * order of precedence: dev+con > dev only > con only. | ||
| 37 | */ | ||
| 38 | static struct clk *clk_find(const char *dev_id, const char *con_id) | ||
| 39 | { | ||
| 40 | struct clk_lookup *p; | ||
| 41 | struct clk *clk = NULL; | ||
| 42 | int match, best = 0; | ||
| 43 | |||
| 44 | list_for_each_entry(p, &clocks, node) { | ||
| 45 | match = 0; | ||
| 46 | if (p->dev_id) { | ||
| 47 | if (!dev_id || strcmp(p->dev_id, dev_id)) | ||
| 48 | continue; | ||
| 49 | match += 2; | ||
| 50 | } | ||
| 51 | if (p->con_id) { | ||
| 52 | if (!con_id || strcmp(p->con_id, con_id)) | ||
| 53 | continue; | ||
| 54 | match += 1; | ||
| 55 | } | ||
| 56 | |||
| 57 | if (match > best) { | ||
| 58 | clk = p->clk; | ||
| 59 | if (match != 3) | ||
| 60 | best = match; | ||
| 61 | else | ||
| 62 | break; | ||
| 63 | } | ||
| 64 | } | ||
| 65 | return clk; | ||
| 66 | } | ||
| 67 | |||
| 68 | struct clk *clk_get_sys(const char *dev_id, const char *con_id) | ||
| 69 | { | ||
| 70 | struct clk *clk; | ||
| 71 | |||
| 72 | mutex_lock(&clocks_mutex); | ||
| 73 | clk = clk_find(dev_id, con_id); | ||
| 74 | if (clk && !__clk_get(clk)) | ||
| 75 | clk = NULL; | ||
| 76 | mutex_unlock(&clocks_mutex); | ||
| 77 | |||
| 78 | return clk ? clk : ERR_PTR(-ENOENT); | ||
| 79 | } | ||
| 80 | EXPORT_SYMBOL(clk_get_sys); | ||
| 81 | |||
| 82 | struct clk *clk_get(struct device *dev, const char *con_id) | ||
| 83 | { | ||
| 84 | const char *dev_id = dev ? dev_name(dev) : NULL; | ||
| 85 | |||
| 86 | return clk_get_sys(dev_id, con_id); | ||
| 87 | } | ||
| 88 | EXPORT_SYMBOL(clk_get); | ||
| 89 | |||
| 90 | void clk_put(struct clk *clk) | ||
| 91 | { | ||
| 92 | __clk_put(clk); | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL(clk_put); | ||
| 95 | |||
| 96 | void clkdev_add(struct clk_lookup *cl) | ||
| 97 | { | ||
| 98 | mutex_lock(&clocks_mutex); | ||
| 99 | list_add_tail(&cl->node, &clocks); | ||
| 100 | mutex_unlock(&clocks_mutex); | ||
| 101 | } | ||
| 102 | EXPORT_SYMBOL(clkdev_add); | ||
| 103 | |||
| 104 | void __init clkdev_add_table(struct clk_lookup *cl, size_t num) | ||
| 105 | { | ||
| 106 | mutex_lock(&clocks_mutex); | ||
| 107 | while (num--) { | ||
| 108 | list_add_tail(&cl->node, &clocks); | ||
| 109 | cl++; | ||
| 110 | } | ||
| 111 | mutex_unlock(&clocks_mutex); | ||
| 112 | } | ||
| 113 | |||
| 114 | #define MAX_DEV_ID 20 | ||
| 115 | #define MAX_CON_ID 16 | ||
| 116 | |||
| 117 | struct clk_lookup_alloc { | ||
| 118 | struct clk_lookup cl; | ||
| 119 | char dev_id[MAX_DEV_ID]; | ||
| 120 | char con_id[MAX_CON_ID]; | ||
| 121 | }; | ||
| 122 | |||
| 123 | struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id, | ||
| 124 | const char *dev_fmt, ...) | ||
| 125 | { | ||
| 126 | struct clk_lookup_alloc *cla; | ||
| 127 | |||
| 128 | cla = kzalloc(sizeof(*cla), GFP_KERNEL); | ||
| 129 | if (!cla) | ||
| 130 | return NULL; | ||
| 131 | |||
| 132 | cla->cl.clk = clk; | ||
| 133 | if (con_id) { | ||
| 134 | strlcpy(cla->con_id, con_id, sizeof(cla->con_id)); | ||
| 135 | cla->cl.con_id = cla->con_id; | ||
| 136 | } | ||
| 137 | |||
| 138 | if (dev_fmt) { | ||
| 139 | va_list ap; | ||
| 140 | |||
| 141 | va_start(ap, dev_fmt); | ||
| 142 | vscnprintf(cla->dev_id, sizeof(cla->dev_id), dev_fmt, ap); | ||
| 143 | cla->cl.dev_id = cla->dev_id; | ||
| 144 | va_end(ap); | ||
| 145 | } | ||
| 146 | |||
| 147 | return &cla->cl; | ||
| 148 | } | ||
| 149 | EXPORT_SYMBOL(clkdev_alloc); | ||
| 150 | |||
| 151 | int clk_add_alias(const char *alias, const char *alias_dev_name, char *id, | ||
| 152 | struct device *dev) | ||
| 153 | { | ||
| 154 | struct clk *r = clk_get(dev, id); | ||
| 155 | struct clk_lookup *l; | ||
| 156 | |||
| 157 | if (IS_ERR(r)) | ||
| 158 | return PTR_ERR(r); | ||
| 159 | |||
| 160 | l = clkdev_alloc(r, alias, alias_dev_name); | ||
| 161 | clk_put(r); | ||
| 162 | if (!l) | ||
| 163 | return -ENODEV; | ||
| 164 | clkdev_add(l); | ||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | EXPORT_SYMBOL(clk_add_alias); | ||
| 168 | |||
| 169 | /* | ||
| 170 | * clkdev_drop - remove a clock dynamically allocated | ||
| 171 | */ | ||
| 172 | void clkdev_drop(struct clk_lookup *cl) | ||
| 173 | { | ||
| 174 | mutex_lock(&clocks_mutex); | ||
| 175 | list_del(&cl->node); | ||
| 176 | mutex_unlock(&clocks_mutex); | ||
| 177 | kfree(cl); | ||
| 178 | } | ||
| 179 | EXPORT_SYMBOL(clkdev_drop); | ||
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c index cc0a932bbea9..e5681636626f 100644 --- a/arch/arm/common/dmabounce.c +++ b/arch/arm/common/dmabounce.c | |||
| @@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, | |||
| 328 | * substitute the safe buffer for the unsafe one. | 328 | * substitute the safe buffer for the unsafe one. |
| 329 | * (basically move the buffer from an unsafe area to a safe one) | 329 | * (basically move the buffer from an unsafe area to a safe one) |
| 330 | */ | 330 | */ |
| 331 | dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | 331 | dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size, |
| 332 | enum dma_data_direction dir) | 332 | enum dma_data_direction dir) |
| 333 | { | 333 | { |
| 334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 334 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
| @@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size, | |||
| 338 | 338 | ||
| 339 | return map_single(dev, ptr, size, dir); | 339 | return map_single(dev, ptr, size, dir); |
| 340 | } | 340 | } |
| 341 | EXPORT_SYMBOL(dma_map_single); | 341 | EXPORT_SYMBOL(__dma_map_single); |
| 342 | 342 | ||
| 343 | /* | 343 | /* |
| 344 | * see if a mapped address was really a "safe" buffer and if so, copy | 344 | * see if a mapped address was really a "safe" buffer and if so, copy |
| @@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single); | |||
| 346 | * the safe buffer. (basically return things back to the way they | 346 | * the safe buffer. (basically return things back to the way they |
| 347 | * should be) | 347 | * should be) |
| 348 | */ | 348 | */ |
| 349 | void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 349 | void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 350 | enum dma_data_direction dir) | 350 | enum dma_data_direction dir) |
| 351 | { | 351 | { |
| 352 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 352 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
| @@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| 354 | 354 | ||
| 355 | unmap_single(dev, dma_addr, size, dir); | 355 | unmap_single(dev, dma_addr, size, dir); |
| 356 | } | 356 | } |
| 357 | EXPORT_SYMBOL(dma_unmap_single); | 357 | EXPORT_SYMBOL(__dma_unmap_single); |
| 358 | 358 | ||
| 359 | dma_addr_t dma_map_page(struct device *dev, struct page *page, | 359 | dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
| 360 | unsigned long offset, size_t size, enum dma_data_direction dir) | 360 | unsigned long offset, size_t size, enum dma_data_direction dir) |
| 361 | { | 361 | { |
| 362 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", | 362 | dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", |
| @@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page, | |||
| 372 | 372 | ||
| 373 | return map_single(dev, page_address(page) + offset, size, dir); | 373 | return map_single(dev, page_address(page) + offset, size, dir); |
| 374 | } | 374 | } |
| 375 | EXPORT_SYMBOL(dma_map_page); | 375 | EXPORT_SYMBOL(__dma_map_page); |
| 376 | 376 | ||
| 377 | /* | 377 | /* |
| 378 | * see if a mapped address was really a "safe" buffer and if so, copy | 378 | * see if a mapped address was really a "safe" buffer and if so, copy |
| @@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page); | |||
| 380 | * the safe buffer. (basically return things back to the way they | 380 | * the safe buffer. (basically return things back to the way they |
| 381 | * should be) | 381 | * should be) |
| 382 | */ | 382 | */ |
| 383 | void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | 383 | void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, |
| 384 | enum dma_data_direction dir) | 384 | enum dma_data_direction dir) |
| 385 | { | 385 | { |
| 386 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | 386 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
| @@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
| 388 | 388 | ||
| 389 | unmap_single(dev, dma_addr, size, dir); | 389 | unmap_single(dev, dma_addr, size, dir); |
| 390 | } | 390 | } |
| 391 | EXPORT_SYMBOL(dma_unmap_page); | 391 | EXPORT_SYMBOL(__dma_unmap_page); |
| 392 | 392 | ||
| 393 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, | 393 | int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, |
| 394 | unsigned long off, size_t sz, enum dma_data_direction dir) | 394 | unsigned long off, size_t sz, enum dma_data_direction dir) |
