diff options
author | Lajos Molnar <molnar@ti.com> | 2010-07-16 03:06:55 -0400 |
---|---|---|
committer | Paolo Pisati <paolo.pisati@canonical.com> | 2012-08-17 04:19:01 -0400 |
commit | 1a32ef8fb92d765f79f3d2597ab76de60f25061a (patch) | |
tree | 7f31b1382ad9b6ce7708dd289c35c779cff15cdb | |
parent | 8208e97c462a444d96d4c7434ce793f135f8ec49 (diff) |
TILER: Cleaned up tmm-pat.c
Removed unnecessary initializations.
Simplified page allocation logic by directly allocating pages
instead of going through free_page list.
Made page and memory allocation global, so that page cache can
be shared between PATs.
Now deinitialization logic works with multiple PATs.
Signed-off-by: Lajos Molnar <molnar@ti.com>
Signed-off-by: David Sin <davidsin@ti.com>
TILER: Simplified dmm.c
Removed unnecessary type casting.
Updated logic to common bitfield setting macro.
Removed unnecessary step-by-step register modifications.
Signed-off-by: Lajos Molnar <molnar@ti.com>
Signed-off-by: David Sin <davidsin@ti.com>
TILER: Cleaned up tcm API definitions.
Fixed comments.
Removed unused methods.
Removed AREA_FMT macro that caused a checkpatch failure.
Added further error checking to tcm methods.
Signed-off-by: Lajos Molnar <molnar@ti.com>
Signed-off-by: David Sin <davidsin@ti.com>
-rw-r--r-- | arch/arm/mach-omap2/include/mach/dmm.h | 2 | ||||
-rw-r--r-- | drivers/media/video/tiler/dmm.c | 211 | ||||
-rw-r--r-- | drivers/media/video/tiler/tcm.h | 64 | ||||
-rw-r--r-- | drivers/media/video/tiler/tcm/tcm-utils.h | 19 | ||||
-rw-r--r-- | drivers/media/video/tiler/tmm-pat.c | 305 | ||||
-rw-r--r-- | drivers/media/video/tiler/tmm.h | 6 |
6 files changed, 220 insertions, 387 deletions
diff --git a/arch/arm/mach-omap2/include/mach/dmm.h b/arch/arm/mach-omap2/include/mach/dmm.h index 700f08aefbc..77f824d2a8c 100644 --- a/arch/arm/mach-omap2/include/mach/dmm.h +++ b/arch/arm/mach-omap2/include/mach/dmm.h | |||
@@ -3,6 +3,8 @@ | |||
3 | * | 3 | * |
4 | * DMM driver support functions for TI OMAP processors. | 4 | * DMM driver support functions for TI OMAP processors. |
5 | * | 5 | * |
6 | * Author: David Sin <davidsin@ti.com> | ||
7 | * | ||
6 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | 8 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
7 | * | 9 | * |
8 | * This package is free software; you can redistribute it and/or modify | 10 | * This package is free software; you can redistribute it and/or modify |
diff --git a/drivers/media/video/tiler/dmm.c b/drivers/media/video/tiler/dmm.c index 8663b36a5e7..685a1935a7f 100644 --- a/drivers/media/video/tiler/dmm.c +++ b/drivers/media/video/tiler/dmm.c | |||
@@ -3,6 +3,9 @@ | |||
3 | * | 3 | * |
4 | * DMM driver support functions for TI OMAP processors. | 4 | * DMM driver support functions for TI OMAP processors. |
5 | * | 5 | * |
6 | * Authors: David Sin <davidsin@ti.com> | ||
7 | * Lajos Molnar <molnar@ti.com> | ||
8 | * | ||
6 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | 9 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
7 | * | 10 | * |
8 | * This package is free software; you can redistribute it and/or modify | 11 | * This package is free software; you can redistribute it and/or modify |
@@ -16,10 +19,6 @@ | |||
16 | 19 | ||
17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/cdev.h> /* struct cdev */ | ||
20 | #include <linux/kdev_t.h> /* MKDEV() */ | ||
21 | #include <linux/fs.h> /* register_chrdev_region() */ | ||
22 | #include <linux/device.h> /* struct class */ | ||
23 | #include <linux/platform_device.h> /* platform_device() */ | 22 | #include <linux/platform_device.h> /* platform_device() */ |
24 | #include <linux/err.h> /* IS_ERR() */ | 23 | #include <linux/err.h> /* IS_ERR() */ |
25 | #include <linux/io.h> /* ioremap() */ | 24 | #include <linux/io.h> /* ioremap() */ |
@@ -29,10 +28,10 @@ | |||
29 | #include <mach/dmm.h> | 28 | #include <mach/dmm.h> |
30 | 29 | ||
31 | #undef __DEBUG__ | 30 | #undef __DEBUG__ |
32 | #define BITS_32(in_NbBits) ((((u32)1 << in_NbBits) - 1) | ((u32)1 << in_NbBits)) | 31 | |
33 | #define BITFIELD_32(in_UpBit, in_LowBit)\ | 32 | #define MASK(msb, lsb) (((1 << ((msb) + 1 - (lsb))) - 1) << (lsb)) |
34 | (BITS_32(in_UpBit) & ~((BITS_32(in_LowBit)) >> 1)) | 33 | #define SET_FLD(reg, msb, lsb, val) \ |
35 | #define BF BITFIELD_32 | 34 | (((reg) & ~MASK((msb), (lsb))) | (((val) << (lsb)) & MASK((msb), (lsb)))) |
36 | 35 | ||
37 | #ifdef __DEBUG__ | 36 | #ifdef __DEBUG__ |
38 | #define DEBUG(x, y) printk(KERN_NOTICE "%s()::%d:%s=(0x%08x)\n", \ | 37 | #define DEBUG(x, y) printk(KERN_NOTICE "%s()::%d:%s=(0x%08x)\n", \ |
@@ -41,16 +40,6 @@ | |||
41 | #define DEBUG(x, y) | 40 | #define DEBUG(x, y) |
42 | #endif | 41 | #endif |
43 | 42 | ||
44 | static s32 dmm_major; | ||
45 | static s32 dmm_minor; | ||
46 | |||
47 | struct dmm_dev { | ||
48 | struct cdev cdev; | ||
49 | }; | ||
50 | |||
51 | static struct dmm_dev *dmm_device; | ||
52 | static struct class *dmmdev_class; | ||
53 | |||
54 | static struct platform_driver dmm_driver_ldm = { | 43 | static struct platform_driver dmm_driver_ldm = { |
55 | .driver = { | 44 | .driver = { |
56 | .owner = THIS_MODULE, | 45 | .owner = THIS_MODULE, |
@@ -63,18 +52,15 @@ static struct platform_driver dmm_driver_ldm = { | |||
63 | 52 | ||
64 | s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode) | 53 | s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode) |
65 | { | 54 | { |
66 | void __iomem *r = NULL; | 55 | void __iomem *r; |
67 | u32 v = -1, w = -1; | 56 | u32 v; |
68 | 57 | ||
69 | /* Only manual refill supported */ | 58 | /* Only manual refill supported */ |
70 | if (mode != MANUAL) | 59 | if (mode != MANUAL) |
71 | return -EFAULT; | 60 | return -EFAULT; |
72 | 61 | ||
73 | /* | 62 | /* Check that the DMM_PAT_STATUS register has not reported an error */ |
74 | * Check that the DMM_PAT_STATUS register | 63 | r = dmm->base + DMM_PAT_STATUS__0; |
75 | * has not reported an error. | ||
76 | */ | ||
77 | r = (void __iomem *)((u32)dmm->base | DMM_PAT_STATUS__0); | ||
78 | v = __raw_readl(r); | 64 | v = __raw_readl(r); |
79 | if ((v & 0xFC00) != 0) { | 65 | if ((v & 0xFC00) != 0) { |
80 | while (1) | 66 | while (1) |
@@ -82,28 +68,19 @@ s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode) | |||
82 | } | 68 | } |
83 | 69 | ||
84 | /* Set "next" register to NULL */ | 70 | /* Set "next" register to NULL */ |
85 | r = (void __iomem *)((u32)dmm->base | DMM_PAT_DESCR__0); | 71 | r = dmm->base + DMM_PAT_DESCR__0; |
86 | v = __raw_readl(r); | 72 | v = __raw_readl(r); |
87 | w = (v & (~(BF(31, 4)))) | ((((u32)NULL) << 4) & BF(31, 4)); | 73 | v = SET_FLD(v, 31, 4, (u32) NULL); |
88 | __raw_writel(w, r); | 74 | __raw_writel(v, r); |
89 | 75 | ||
90 | /* Set area to be refilled */ | 76 | /* Set area to be refilled */ |
91 | r = (void __iomem *)((u32)dmm->base | DMM_PAT_AREA__0); | 77 | r = dmm->base + DMM_PAT_AREA__0; |
92 | v = __raw_readl(r); | ||
93 | w = (v & (~(BF(30, 24)))) | ((((s8)pd->area.y1) << 24) & BF(30, 24)); | ||
94 | __raw_writel(w, r); | ||
95 | |||
96 | v = __raw_readl(r); | ||
97 | w = (v & (~(BF(23, 16)))) | ((((s8)pd->area.x1) << 16) & BF(23, 16)); | ||
98 | __raw_writel(w, r); | ||
99 | |||
100 | v = __raw_readl(r); | ||
101 | w = (v & (~(BF(14, 8)))) | ((((s8)pd->area.y0) << 8) & BF(14, 8)); | ||
102 | __raw_writel(w, r); | ||
103 | |||
104 | v = __raw_readl(r); | 78 | v = __raw_readl(r); |
105 | w = (v & (~(BF(7, 0)))) | ((((s8)pd->area.x0) << 0) & BF(7, 0)); | 79 | v = SET_FLD(v, 30, 24, pd->area.y1); |
106 | __raw_writel(w, r); | 80 | v = SET_FLD(v, 23, 16, pd->area.x1); |
81 | v = SET_FLD(v, 14, 8, pd->area.y0); | ||
82 | v = SET_FLD(v, 7, 0, pd->area.x0); | ||
83 | __raw_writel(v, r); | ||
107 | wmb(); | 84 | wmb(); |
108 | 85 | ||
109 | #ifdef __DEBUG__ | 86 | #ifdef __DEBUG__ |
@@ -115,92 +92,71 @@ s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode) | |||
115 | #endif | 92 | #endif |
116 | 93 | ||
117 | /* First, clear the DMM_PAT_IRQSTATUS register */ | 94 | /* First, clear the DMM_PAT_IRQSTATUS register */ |
118 | r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS); | 95 | r = dmm->base + DMM_PAT_IRQSTATUS; |
119 | __raw_writel(0xFFFFFFFF, r); | 96 | __raw_writel(0xFFFFFFFF, r); |
120 | wmb(); | 97 | wmb(); |
121 | 98 | ||
122 | r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS_RAW); | 99 | r = dmm->base + DMM_PAT_IRQSTATUS_RAW; |
123 | v = 0xFFFFFFFF; | 100 | do { |
124 | |||
125 | while (v != 0x0) { | ||
126 | v = __raw_readl(r); | 101 | v = __raw_readl(r); |
127 | DEBUG("DMM_PAT_IRQSTATUS_RAW", v); | 102 | DEBUG("DMM_PAT_IRQSTATUS_RAW", v); |
128 | } | 103 | } while (v != 0x0); |
129 | 104 | ||
130 | /* Fill data register */ | 105 | /* Fill data register */ |
131 | r = (void __iomem *)((u32)dmm->base | DMM_PAT_DATA__0); | 106 | r = dmm->base + DMM_PAT_DATA__0; |
132 | v = __raw_readl(r); | 107 | v = __raw_readl(r); |
133 | 108 | ||
134 | /* Apply 4 bit left shft to counter the 4 bit right shift */ | 109 | /* pd->data must be 16 aligned */ |
135 | w = (v & (~(BF(31, 4)))) | ((((u32)(pd->data >> 4)) << 4) & BF(31, 4)); | 110 | BUG_ON(pd->data & 15); |
136 | __raw_writel(w, r); | 111 | v = SET_FLD(v, 31, 4, pd->data >> 4); |
112 | __raw_writel(v, r); | ||
137 | wmb(); | 113 | wmb(); |
138 | 114 | ||
139 | /* Read back PAT_DATA__0 to see if write was successful */ | 115 | /* Read back PAT_DATA__0 to see if write was successful */ |
140 | v = 0x0; | 116 | do { |
141 | while (v != pd->data) { | ||
142 | v = __raw_readl(r); | 117 | v = __raw_readl(r); |
143 | DEBUG("DMM_PAT_DATA__0", v); | 118 | DEBUG("DMM_PAT_DATA__0", v); |
144 | } | 119 | } while (v != pd->data); |
145 | |||
146 | r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_CTRL__0); | ||
147 | v = __raw_readl(r); | ||
148 | |||
149 | w = (v & (~(BF(31, 28)))) | ((((u32)pd->ctrl.ini) << 28) & BF(31, 28)); | ||
150 | __raw_writel(w, r); | ||
151 | |||
152 | v = __raw_readl(r); | ||
153 | w = (v & (~(BF(16, 16)))) | ((((u32)pd->ctrl.sync) << 16) & BF(16, 16)); | ||
154 | __raw_writel(w, r); | ||
155 | |||
156 | v = __raw_readl(r); | ||
157 | w = (v & (~(BF(9, 8)))) | ((((u32)pd->ctrl.lut_id) << 8) & BF(9, 8)); | ||
158 | __raw_writel(w, r); | ||
159 | |||
160 | v = __raw_readl(r); | ||
161 | w = (v & (~(BF(6, 4)))) | ((((u32)pd->ctrl.dir) << 4) & BF(6, 4)); | ||
162 | __raw_writel(w, r); | ||
163 | 120 | ||
121 | r = dmm->base + DMM_PAT_CTRL__0; | ||
164 | v = __raw_readl(r); | 122 | v = __raw_readl(r); |
165 | w = (v & (~(BF(0, 0)))) | ((((u32)pd->ctrl.start) << 0) & BF(0, 0)); | 123 | v = SET_FLD(v, 31, 28, pd->ctrl.ini); |
166 | __raw_writel(w, r); | 124 | v = SET_FLD(v, 16, 16, pd->ctrl.sync); |
125 | v = SET_FLD(v, 9, 8, pd->ctrl.lut_id); | ||
126 | v = SET_FLD(v, 6, 4, pd->ctrl.dir); | ||
127 | v = SET_FLD(v, 0, 0, pd->ctrl.start); | ||
128 | __raw_writel(v, r); | ||
167 | wmb(); | 129 | wmb(); |
168 | 130 | ||
169 | /* | 131 | /* Check if PAT_IRQSTATUS_RAW is set after the PAT has been refilled */ |
170 | * Now, check if PAT_IRQSTATUS_RAW has been | 132 | r = dmm->base + DMM_PAT_IRQSTATUS_RAW; |
171 | * set after the PAT has been refilled | 133 | do { |
172 | */ | ||
173 | r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS_RAW); | ||
174 | v = 0x0; | ||
175 | while ((v & 0x3) != 0x3) { | ||
176 | v = __raw_readl(r); | 134 | v = __raw_readl(r); |
177 | DEBUG("DMM_PAT_IRQSTATUS_RAW", v); | 135 | DEBUG("DMM_PAT_IRQSTATUS_RAW", v); |
178 | } | 136 | } while ((v & 0x3) != 0x3); |
179 | 137 | ||
180 | /* Again, clear the DMM_PAT_IRQSTATUS register */ | 138 | /* Again, clear the DMM_PAT_IRQSTATUS register */ |
181 | r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS); | 139 | r = dmm->base + DMM_PAT_IRQSTATUS; |
182 | __raw_writel(0xFFFFFFFF, r); | 140 | __raw_writel(0xFFFFFFFF, r); |
183 | wmb(); | 141 | wmb(); |
184 | 142 | ||
185 | r = (void __iomem *)((u32)dmm->base | (u32)DMM_PAT_IRQSTATUS_RAW); | 143 | r = dmm->base + DMM_PAT_IRQSTATUS_RAW; |
186 | v = 0xFFFFFFFF; | 144 | do { |
187 | |||
188 | while (v != 0x0) { | ||
189 | v = __raw_readl(r); | 145 | v = __raw_readl(r); |
190 | DEBUG("DMM_PAT_IRQSTATUS_RAW", v); | 146 | DEBUG("DMM_PAT_IRQSTATUS_RAW", v); |
191 | } | 147 | } while (v != 0x0); |
192 | 148 | ||
193 | /* Again, set "next" register to NULL to clear any PAT STATUS errors */ | 149 | /* Again, set "next" register to NULL to clear any PAT STATUS errors */ |
194 | r = (void __iomem *)((u32)dmm->base | DMM_PAT_DESCR__0); | 150 | r = dmm->base + DMM_PAT_DESCR__0; |
195 | v = __raw_readl(r); | 151 | v = __raw_readl(r); |
196 | w = (v & (~(BF(31, 4)))) | ((((u32)NULL) << 4) & BF(31, 4)); | 152 | v = SET_FLD(v, 31, 4, (u32) NULL); |
197 | __raw_writel(w, r); | 153 | __raw_writel(v, r); |
198 | 154 | ||
199 | /* | 155 | /* |
200 | * Now, check that the DMM_PAT_STATUS register | 156 | * Now, check that the DMM_PAT_STATUS register |
201 | * has not reported an error before exiting. | 157 | * has not reported an error before exiting. |
202 | */ | 158 | */ |
203 | r = (void __iomem *)((u32)dmm->base | DMM_PAT_STATUS__0); | 159 | r = dmm->base + DMM_PAT_STATUS__0; |
204 | v = __raw_readl(r); | 160 | v = __raw_readl(r); |
205 | if ((v & 0xFC00) != 0) { | 161 | if ((v & 0xFC00) != 0) { |
206 | while (1) | 162 | while (1) |
@@ -211,25 +167,10 @@ s32 dmm_pat_refill(struct dmm *dmm, struct pat *pd, enum pat_mode mode) | |||
211 | } | 167 | } |
212 | EXPORT_SYMBOL(dmm_pat_refill); | 168 | EXPORT_SYMBOL(dmm_pat_refill); |
213 | 169 | ||
214 | static s32 dmm_open(struct inode *ip, struct file *filp) | ||
215 | { | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | static s32 dmm_release(struct inode *ip, struct file *filp) | ||
220 | { | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | static const struct file_operations dmm_fops = { | ||
225 | .open = dmm_open, | ||
226 | .release = dmm_release, | ||
227 | }; | ||
228 | |||
229 | struct dmm *dmm_pat_init(u32 id) | 170 | struct dmm *dmm_pat_init(u32 id) |
230 | { | 171 | { |
231 | u32 base = 0; | 172 | u32 base; |
232 | struct dmm *dmm = NULL; | 173 | struct dmm *dmm; |
233 | switch (id) { | 174 | switch (id) { |
234 | case 0: | 175 | case 0: |
235 | /* only support id 0 for now */ | 176 | /* only support id 0 for now */ |
@@ -276,60 +217,16 @@ EXPORT_SYMBOL(dmm_pat_release); | |||
276 | 217 | ||
277 | static s32 __init dmm_init(void) | 218 | static s32 __init dmm_init(void) |
278 | { | 219 | { |
279 | dev_t dev = 0; | 220 | return platform_driver_register(&dmm_driver_ldm); |
280 | s32 r = -1; | ||
281 | struct device *device = NULL; | ||
282 | |||
283 | if (dmm_major) { | ||
284 | dev = MKDEV(dmm_major, dmm_minor); | ||
285 | r = register_chrdev_region(dev, 1, "dmm"); | ||
286 | } else { | ||
287 | r = alloc_chrdev_region(&dev, dmm_minor, 1, "dmm"); | ||
288 | dmm_major = MAJOR(dev); | ||
289 | } | ||
290 | |||
291 | dmm_device = kmalloc(sizeof(*dmm_device), GFP_KERNEL); | ||
292 | if (!dmm_device) { | ||
293 | unregister_chrdev_region(dev, 1); | ||
294 | return -ENOMEM; | ||
295 | } | ||
296 | memset(dmm_device, 0x0, sizeof(struct dmm_dev)); | ||
297 | |||
298 | cdev_init(&dmm_device->cdev, &dmm_fops); | ||
299 | dmm_device->cdev.owner = THIS_MODULE; | ||
300 | dmm_device->cdev.ops = &dmm_fops; | ||
301 | |||
302 | r = cdev_add(&dmm_device->cdev, dev, 1); | ||
303 | if (r) | ||
304 | printk(KERN_ERR "cdev_add():failed\n"); | ||
305 | |||
306 | dmmdev_class = class_create(THIS_MODULE, "dmm"); | ||
307 | |||
308 | if (IS_ERR(dmmdev_class)) { | ||
309 | printk(KERN_ERR "class_create():failed\n"); | ||
310 | goto EXIT; | ||
311 | } | ||
312 | |||
313 | device = device_create(dmmdev_class, NULL, dev, NULL, "dmm"); | ||
314 | if (device == NULL) | ||
315 | printk(KERN_ERR "device_create() fail\n"); | ||
316 | |||
317 | r = platform_driver_register(&dmm_driver_ldm); | ||
318 | |||
319 | EXIT: | ||
320 | return r; | ||
321 | } | 221 | } |
322 | 222 | ||
323 | static void __exit dmm_exit(void) | 223 | static void __exit dmm_exit(void) |
324 | { | 224 | { |
325 | platform_driver_unregister(&dmm_driver_ldm); | 225 | platform_driver_unregister(&dmm_driver_ldm); |
326 | cdev_del(&dmm_device->cdev); | ||
327 | kfree(dmm_device); | ||
328 | device_destroy(dmmdev_class, MKDEV(dmm_major, dmm_minor)); | ||
329 | class_destroy(dmmdev_class); | ||
330 | } | 226 | } |
331 | 227 | ||
332 | MODULE_LICENSE("GPL v2"); | 228 | MODULE_LICENSE("GPL v2"); |
333 | MODULE_AUTHOR("davidsin@ti.com"); | 229 | MODULE_AUTHOR("davidsin@ti.com"); |
230 | MODULE_AUTHOR("molnar@ti.com"); | ||
334 | module_init(dmm_init); | 231 | module_init(dmm_init); |
335 | module_exit(dmm_exit); | 232 | module_exit(dmm_exit); |
diff --git a/drivers/media/video/tiler/tcm.h b/drivers/media/video/tiler/tcm.h index ccd3ce4eaaa..abeb99b6697 100644 --- a/drivers/media/video/tiler/tcm.h +++ b/drivers/media/video/tiler/tcm.h | |||
@@ -4,6 +4,8 @@ | |||
4 | * TILER container manager specification and support functions for TI | 4 | * TILER container manager specification and support functions for TI |
5 | * processors. | 5 | * processors. |
6 | * | 6 | * |
7 | * Author: Lajos Molnar <molnar@ti.com> | ||
8 | * | ||
7 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | 9 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
8 | * | 10 | * |
9 | * This package is free software; you can redistribute it and/or modify | 11 | * This package is free software; you can redistribute it and/or modify |
@@ -15,19 +17,18 @@ | |||
15 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | 17 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. |
16 | */ | 18 | */ |
17 | 19 | ||
18 | #ifndef _TCM_H_ | 20 | #ifndef TCM_H |
19 | #define _TCM_H_ | 21 | #define TCM_H |
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/module.h> | ||
23 | 22 | ||
24 | struct tcm; | 23 | struct tcm; |
25 | 24 | ||
25 | /* point */ | ||
26 | struct tcm_pt { | 26 | struct tcm_pt { |
27 | u16 x; | 27 | u16 x; |
28 | u16 y; | 28 | u16 y; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | /* 1d or 2d area */ | ||
31 | struct tcm_area { | 32 | struct tcm_area { |
32 | bool is2d; /* whether are is 1d or 2d */ | 33 | bool is2d; /* whether are is 1d or 2d */ |
33 | struct tcm *tcm; /* parent */ | 34 | struct tcm *tcm; /* parent */ |
@@ -90,8 +91,6 @@ struct tcm *name(u16 width, u16 height, typeof(attr_t) *attr); | |||
90 | /** | 91 | /** |
91 | * Deinitialize tiler container manager. | 92 | * Deinitialize tiler container manager. |
92 | * | 93 | * |
93 | * @author Ravi Ramachandra (3/1/2010) | ||
94 | * | ||
95 | * @param tcm Pointer to container manager. | 94 | * @param tcm Pointer to container manager. |
96 | * | 95 | * |
97 | * @return 0 on success, non-0 error value on error. The call | 96 | * @return 0 on success, non-0 error value on error. The call |
@@ -108,8 +107,6 @@ static inline void tcm_deinit(struct tcm *tcm) | |||
108 | /** | 107 | /** |
109 | * Reserves a 2D area in the container. | 108 | * Reserves a 2D area in the container. |
110 | * | 109 | * |
111 | * @author Ravi Ramachandra (3/1/2010) | ||
112 | * | ||
113 | * @param tcm Pointer to container manager. | 110 | * @param tcm Pointer to container manager. |
114 | * @param height Height(in pages) of area to be reserved. | 111 | * @param height Height(in pages) of area to be reserved. |
115 | * @param width Width(in pages) of area to be reserved. | 112 | * @param width Width(in pages) of area to be reserved. |
@@ -129,13 +126,17 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, | |||
129 | u16 align, struct tcm_area *area) | 126 | u16 align, struct tcm_area *area) |
130 | { | 127 | { |
131 | /* perform rudimentary error checking */ | 128 | /* perform rudimentary error checking */ |
132 | s32 res = (tcm == NULL ? -ENODEV : | 129 | s32 res = tcm == NULL ? -ENODEV : |
133 | area == NULL ? -EINVAL : | 130 | (area == NULL || width == 0 || height == 0 || |
134 | (height > tcm->height || width > tcm->width) ? -ENOMEM : | 131 | /* align must be a 2 power */ |
135 | tcm->reserve_2d(tcm, height, width, align, area)); | 132 | align & (align - 1)) ? -EINVAL : |
136 | 133 | (height > tcm->height || width > tcm->width) ? -ENOMEM : 0; | |
137 | if (area) | 134 | |
135 | if (!res) { | ||
136 | area->is2d = true; | ||
137 | res = tcm->reserve_2d(tcm, height, width, align, area); | ||
138 | area->tcm = res ? NULL : tcm; | 138 | area->tcm = res ? NULL : tcm; |
139 | } | ||
139 | 140 | ||
140 | return res; | 141 | return res; |
141 | } | 142 | } |
@@ -143,8 +144,6 @@ static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height, | |||
143 | /** | 144 | /** |
144 | * Reserves a 1D area in the container. | 145 | * Reserves a 1D area in the container. |
145 | * | 146 | * |
146 | * @author Ravi Ramachandra (3/1/2010) | ||
147 | * | ||
148 | * @param tcm Pointer to container manager. | 147 | * @param tcm Pointer to container manager. |
149 | * @param slots Number of (contiguous) slots to reserve. | 148 | * @param slots Number of (contiguous) slots to reserve. |
150 | * @param area Pointer to where the reserved area should be stored. | 149 | * @param area Pointer to where the reserved area should be stored. |
@@ -159,13 +158,15 @@ static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots, | |||
159 | struct tcm_area *area) | 158 | struct tcm_area *area) |
160 | { | 159 | { |
161 | /* perform rudimentary error checking */ | 160 | /* perform rudimentary error checking */ |
162 | s32 res = (tcm == NULL ? -ENODEV : | 161 | s32 res = tcm == NULL ? -ENODEV : |
163 | area == NULL ? -EINVAL : | 162 | (area == NULL || slots == 0) ? -EINVAL : |
164 | slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : | 163 | slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0; |
165 | tcm->reserve_1d(tcm, slots, area)); | ||
166 | 164 | ||
167 | if (area) | 165 | if (!res) { |
166 | area->is2d = false; | ||
167 | res = tcm->reserve_1d(tcm, slots, area); | ||
168 | area->tcm = res ? NULL : tcm; | 168 | area->tcm = res ? NULL : tcm; |
169 | } | ||
169 | 170 | ||
170 | return res; | 171 | return res; |
171 | } | 172 | } |
@@ -173,8 +174,6 @@ static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots, | |||
173 | /** | 174 | /** |
174 | * Free a previously reserved area from the container. | 175 | * Free a previously reserved area from the container. |
175 | * | 176 | * |
176 | * @author Ravi Ramachandra (3/1/2010) | ||
177 | * | ||
178 | * @param area Pointer to area reserved by a prior call to | 177 | * @param area Pointer to area reserved by a prior call to |
179 | * tcm_reserve_1d or tcm_reserve_2d call, whether | 178 | * tcm_reserve_1d or tcm_reserve_2d call, whether |
180 | * it was successful or not. (Note: all fields of | 179 | * it was successful or not. (Note: all fields of |
@@ -209,8 +208,6 @@ static inline s32 tcm_free(struct tcm_area *area) | |||
209 | * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no | 208 | * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no |
210 | * longer a valid area. | 209 | * longer a valid area. |
211 | * | 210 | * |
212 | * @author Lajos Molnar (3/17/2010) | ||
213 | * | ||
214 | * @param parent Pointer to a VALID parent area that will get modified | 211 | * @param parent Pointer to a VALID parent area that will get modified |
215 | * @param slice Pointer to the slice area that will get modified | 212 | * @param slice Pointer to the slice area that will get modified |
216 | */ | 213 | */ |
@@ -234,16 +231,10 @@ static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice) | |||
234 | } | 231 | } |
235 | } | 232 | } |
236 | 233 | ||
237 | /** | 234 | /* Verify if a tcm area is logically valid */ |
238 | * Verifies if a tcm area is logically valid. | ||
239 | * | ||
240 | * @param area Pointer to tcm area | ||
241 | * | ||
242 | * @return TRUE if area is logically valid, FALSE otherwise. | ||
243 | */ | ||
244 | static inline bool tcm_area_is_valid(struct tcm_area *area) | 235 | static inline bool tcm_area_is_valid(struct tcm_area *area) |
245 | { | 236 | { |
246 | return (area && area->tcm && | 237 | return area && area->tcm && |
247 | /* coordinate bounds */ | 238 | /* coordinate bounds */ |
248 | area->p1.x < area->tcm->width && | 239 | area->p1.x < area->tcm->width && |
249 | area->p1.y < area->tcm->height && | 240 | area->p1.y < area->tcm->height && |
@@ -255,8 +246,7 @@ static inline bool tcm_area_is_valid(struct tcm_area *area) | |||
255 | area->p1.x + area->p1.y * area->tcm->width) || | 246 | area->p1.x + area->p1.y * area->tcm->width) || |
256 | /* 2D coordinate relationship */ | 247 | /* 2D coordinate relationship */ |
257 | (area->is2d && | 248 | (area->is2d && |
258 | area->p0.x <= area->p1.x)) | 249 | area->p0.x <= area->p1.x)); |
259 | ); | ||
260 | } | 250 | } |
261 | 251 | ||
262 | /* see if a coordinate is within an area */ | 252 | /* see if a coordinate is within an area */ |
@@ -316,4 +306,4 @@ static inline u16 __tcm_sizeof(struct tcm_area *area) | |||
316 | tcm_slice(&safe, &var); \ | 306 | tcm_slice(&safe, &var); \ |
317 | var.tcm; tcm_slice(&safe, &var)) | 307 | var.tcm; tcm_slice(&safe, &var)) |
318 | 308 | ||
319 | #endif /* _TCM_H_ */ | 309 | #endif |
diff --git a/drivers/media/video/tiler/tcm/tcm-utils.h b/drivers/media/video/tiler/tcm/tcm-utils.h index 1013929a455..0d1260af197 100644 --- a/drivers/media/video/tiler/tcm/tcm-utils.h +++ b/drivers/media/video/tiler/tcm/tcm-utils.h | |||
@@ -3,6 +3,8 @@ | |||
3 | * | 3 | * |
4 | * Utility functions for implementing TILER container managers. | 4 | * Utility functions for implementing TILER container managers. |
5 | * | 5 | * |
6 | * Author: Lajos Molnar <molnar@ti.com> | ||
7 | * | ||
6 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | 8 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
7 | * | 9 | * |
8 | * This package is free software; you can redistribute it and/or modify | 10 | * This package is free software; you can redistribute it and/or modify |
@@ -14,19 +16,17 @@ | |||
14 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. | 16 | * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. |
15 | */ | 17 | */ |
16 | 18 | ||
17 | #ifndef _TCM_UTILS_H | 19 | #ifndef TCM_UTILS_H |
18 | #define _TCM_UTILS_H | 20 | #define TCM_UTILS_H |
19 | 21 | ||
20 | #include "../tcm.h" | 22 | #include "../tcm.h" |
21 | 23 | ||
22 | #define AREA_FMT "(%03d %03d)-(%03d %03d)" | ||
23 | #define AREA(area) (area).p0.x, (area).p0.y, (area).p1.x, (area).p1.y | ||
24 | |||
25 | /* TCM_ALG_NAME must be defined to use the debug methods */ | 24 | /* TCM_ALG_NAME must be defined to use the debug methods */ |
26 | 25 | ||
27 | #ifdef DEBUG | 26 | #ifdef DEBUG |
28 | #define IFDEBUG(x) x | 27 | #define IFDEBUG(x) x |
29 | #else | 28 | #else |
29 | /* compile-check debug statements even if not DEBUG */ | ||
30 | #define IFDEBUG(x) do { if (0) x; } while (0) | 30 | #define IFDEBUG(x) do { if (0) x; } while (0) |
31 | #endif | 31 | #endif |
32 | 32 | ||
@@ -38,7 +38,8 @@ | |||
38 | #define P2(fmt, ...) P(KERN_INFO, fmt, ##__VA_ARGS__) | 38 | #define P2(fmt, ...) P(KERN_INFO, fmt, ##__VA_ARGS__) |
39 | #define P3(fmt, ...) P(KERN_DEBUG, fmt, ##__VA_ARGS__) | 39 | #define P3(fmt, ...) P(KERN_DEBUG, fmt, ##__VA_ARGS__) |
40 | 40 | ||
41 | #define PA(level, msg, p_area) P##level(msg " " AREA_FMT "\n", AREA(*(p_area))) | 41 | #define PA(level, msg, p_area) P##level(msg " (%03d %03d)-(%03d %03d)\n", \ |
42 | (p_area)->p0.x, (p_area)->p0.y, (p_area)->p1.x, (p_area)->p1.y) | ||
42 | 43 | ||
43 | /* assign coordinates to area */ | 44 | /* assign coordinates to area */ |
44 | static inline | 45 | static inline |
@@ -50,10 +51,4 @@ void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1) | |||
50 | a->p1.y = y1; | 51 | a->p1.y = y1; |
51 | } | 52 | } |
52 | 53 | ||
53 | static inline | ||
54 | void dump_area(struct tcm_area *area) | ||
55 | { | ||
56 | printk(KERN_NOTICE AREA_FMT "\n", AREA(*area)); | ||
57 | } | ||
58 | |||
59 | #endif | 54 | #endif |
diff --git a/drivers/media/video/tiler/tmm-pat.c b/drivers/media/video/tiler/tmm-pat.c index c3c86e582db..0e50fa35912 100644 --- a/drivers/media/video/tiler/tmm-pat.c +++ b/drivers/media/video/tiler/tmm-pat.c | |||
@@ -3,6 +3,8 @@ | |||
3 | * | 3 | * |
4 | * DMM driver support functions for TI OMAP processors. | 4 | * DMM driver support functions for TI OMAP processors. |
5 | * | 5 | * |
6 | * Author: Lajos Molnar <molnar@ti.com>, David Sin <dsin@ti.com> | ||
7 | * | ||
6 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | 8 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
7 | * | 9 | * |
8 | * This package is free software; you can redistribute it and/or modify | 10 | * This package is free software; you can redistribute it and/or modify |
@@ -24,246 +26,195 @@ | |||
24 | 26 | ||
25 | #include "tmm.h" | 27 | #include "tmm.h" |
26 | 28 | ||
27 | /** | 29 | /* Memory limit to cache free pages. TILER will eventually use this much */ |
28 | * Number of pages to allocate when | 30 | static u32 cache_limit = (40 * 1024 * 1024); |
29 | * refilling the free page stack. | 31 | module_param_named(cache, cache_limit, uint, 0644); |
30 | */ | 32 | MODULE_PARM_DESC(cache, "Cache free pages if total memory is under this limit"); |
31 | #define MAX 16 | ||
32 | #define DMM_PAGE 0x1000 | ||
33 | |||
34 | /* Max pages in free page stack */ | ||
35 | #define PAGE_CAP (256 * 40) | ||
36 | 33 | ||
37 | /* Number of pages currently allocated */ | 34 | /* global state - statically initialized */ |
38 | static unsigned long count; | 35 | static LIST_HEAD(free_list); /* page cache: list of free pages */ |
36 | static u32 total_mem; /* total memory allocated (free & used) */ | ||
37 | static u32 refs; /* number of tmm_pat instances */ | ||
38 | static DEFINE_MUTEX(mtx); /* global mutex */ | ||
39 | 39 | ||
40 | /** | 40 | /* The page struct pointer and physical address of each page.*/ |
41 | * Used to keep track of mem per | 41 | struct mem { |
42 | * dmm_get_pages call. | ||
43 | */ | ||
44 | struct fast { | ||
45 | struct list_head list; | 42 | struct list_head list; |
46 | struct mem **mem; | 43 | struct page *pg; /* page struct */ |
47 | u32 *pa; | 44 | u32 pa; /* physical address */ |
48 | u32 num; | ||
49 | }; | 45 | }; |
50 | 46 | ||
51 | /** | 47 | /* Used to keep track of mem per tmm_pat_get_pages call */ |
52 | * Used to keep track of the page struct ptrs | 48 | struct fast { |
53 | * and physical addresses of each page. | ||
54 | */ | ||
55 | struct mem { | ||
56 | struct list_head list; | 49 | struct list_head list; |
57 | struct page *pg; | 50 | struct mem **mem; /* array of page info */ |
58 | u32 pa; | 51 | u32 *pa; /* array of physical addresses */ |
52 | u32 num; /* number of pages */ | ||
59 | }; | 53 | }; |
60 | 54 | ||
61 | /** | 55 | /* TMM PAT private structure */ |
62 | * TMM PAT private structure | ||
63 | */ | ||
64 | struct dmm_mem { | 56 | struct dmm_mem { |
65 | struct fast fast_list; | 57 | struct list_head fast_list; |
66 | struct mem free_list; | ||
67 | struct mem used_list; | ||
68 | struct mutex mtx; | ||
69 | struct dmm *dmm; | 58 | struct dmm *dmm; |
70 | }; | 59 | }; |
71 | 60 | ||
72 | static void dmm_free_fast_list(struct fast *fast) | 61 | /** |
62 | * Frees pages in a fast structure. Moves pages to the free list if there | ||
63 | * are less pages used than max_to_keep. Otherwise, it frees the pages | ||
64 | */ | ||
65 | static void free_fast(struct fast *f) | ||
73 | { | 66 | { |
74 | struct list_head *pos = NULL, *q = NULL; | ||
75 | struct fast *f = NULL; | ||
76 | s32 i = 0; | 67 | s32 i = 0; |
77 | 68 | ||
78 | /* mutex is locked */ | 69 | /* mutex is locked */ |
79 | list_for_each_safe(pos, q, &fast->list) { | 70 | for (i = 0; i < f->num; i++) { |
80 | f = list_entry(pos, struct fast, list); | 71 | if (total_mem < cache_limit) { |
81 | for (i = 0; i < f->num; i++) | 72 | /* cache free page if under the limit */ |
73 | list_add(&f->mem[i]->list, &free_list); | ||
74 | } else { | ||
75 | /* otherwise, free */ | ||
76 | total_mem -= PAGE_SIZE; | ||
82 | __free_page(f->mem[i]->pg); | 77 | __free_page(f->mem[i]->pg); |
83 | kfree(f->pa); | 78 | } |
84 | kfree(f->mem); | ||
85 | list_del(pos); | ||
86 | kfree(f); | ||
87 | } | 79 | } |
80 | kfree(f->pa); | ||
81 | kfree(f->mem); | ||
82 | /* remove only if element was added */ | ||
83 | if (f->list.next) | ||
84 | list_del(&f->list); | ||
85 | kfree(f); | ||
88 | } | 86 | } |
89 | 87 | ||
90 | static u32 fill_page_stack(struct mem *mem, struct mutex *mtx) | 88 | /* allocate and flush a page */ |
89 | static struct mem *alloc_mem(void) | ||
91 | { | 90 | { |
92 | s32 i = 0; | 91 | struct mem *m = kmalloc(sizeof(*m), GFP_KERNEL); |
93 | struct mem *m = NULL; | 92 | if (!m) |
94 | 93 | return NULL; | |
95 | for (i = 0; i < MAX; i++) { | 94 | memset(m, 0, sizeof(*m)); |
96 | m = kmalloc(sizeof(*m), GFP_KERNEL); | ||
97 | if (!m) | ||
98 | return -ENOMEM; | ||
99 | memset(m, 0x0, sizeof(*m)); | ||
100 | |||
101 | m->pg = alloc_page(GFP_KERNEL | GFP_DMA); | ||
102 | if (!m->pg) { | ||
103 | kfree(m); | ||
104 | return -ENOMEM; | ||
105 | } | ||
106 | 95 | ||
107 | m->pa = page_to_phys(m->pg); | 96 | m->pg = alloc_page(GFP_KERNEL | GFP_DMA); |
97 | if (!m->pg) { | ||
98 | kfree(m); | ||
99 | return NULL; | ||
100 | } | ||
108 | 101 | ||
109 | /** | 102 | m->pa = page_to_phys(m->pg); |
110 | * Note: we need to flush the cache | ||
111 | * entry for each page we allocate. | ||
112 | */ | ||
113 | dmac_flush_range((void *)page_address(m->pg), | ||
114 | (void *)page_address(m->pg) + DMM_PAGE); | ||
115 | outer_flush_range(m->pa, m->pa + DMM_PAGE); | ||
116 | 103 | ||
117 | mutex_lock(mtx); | 104 | /* flush the cache entry for each page we allocate. */ |
118 | count++; | 105 | dmac_flush_range(page_address(m->pg), |
119 | list_add(&m->list, &mem->list); | 106 | page_address(m->pg) + PAGE_SIZE); |
120 | mutex_unlock(mtx); | 107 | outer_flush_range(m->pa, m->pa + PAGE_SIZE); |
121 | } | 108 | |
122 | return 0x0; | 109 | return m; |
123 | } | 110 | } |
124 | 111 | ||
125 | static void dmm_free_page_stack(struct mem *mem) | 112 | static void free_page_cache(void) |
126 | { | 113 | { |
127 | struct list_head *pos = NULL, *q = NULL; | 114 | struct mem *m, *m_; |
128 | struct mem *m = NULL; | ||
129 | 115 | ||
130 | /* mutex is locked */ | 116 | /* mutex is locked */ |
131 | list_for_each_safe(pos, q, &mem->list) { | 117 | list_for_each_entry_safe(m, m_, &free_list, list) { |
132 | m = list_entry(pos, struct mem, list); | ||
133 | __free_page(m->pg); | 118 | __free_page(m->pg); |
134 | list_del(pos); | 119 | total_mem -= PAGE_SIZE; |
120 | list_del(&m->list); | ||
135 | kfree(m); | 121 | kfree(m); |
136 | } | 122 | } |
137 | } | 123 | } |
138 | 124 | ||
139 | static void tmm_pat_deinit(struct tmm *tmm) | 125 | static void tmm_pat_deinit(struct tmm *tmm) |
140 | { | 126 | { |
127 | struct fast *f, *f_; | ||
141 | struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; | 128 | struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; |
142 | 129 | ||
143 | mutex_lock(&pvt->mtx); | 130 | mutex_lock(&mtx); |
144 | dmm_free_fast_list(&pvt->fast_list); | 131 | |
145 | dmm_free_page_stack(&pvt->free_list); | 132 | /* free all outstanding used memory */ |
146 | dmm_free_page_stack(&pvt->used_list); | 133 | list_for_each_entry_safe(f, f_, &pvt->fast_list, list) |
147 | mutex_destroy(&pvt->mtx); | 134 | free_fast(f); |
135 | |||
136 | /* if this is the last tmm_pat, free all memory */ | ||
137 | if (--refs == 0) | ||
138 | free_page_cache(); | ||
139 | |||
140 | mutex_unlock(&mtx); | ||
148 | } | 141 | } |
149 | 142 | ||
150 | static u32 *tmm_pat_get_pages(struct tmm *tmm, s32 n) | 143 | static u32 *tmm_pat_get_pages(struct tmm *tmm, u32 n) |
151 | { | 144 | { |
152 | s32 i = 0; | 145 | struct mem *m; |
153 | struct list_head *pos = NULL, *q = NULL; | 146 | struct fast *f; |
154 | struct mem *m = NULL; | ||
155 | struct fast *f = NULL; | ||
156 | struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; | 147 | struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; |
157 | 148 | ||
158 | if (n <= 0 || n > 0x8000) | ||
159 | return NULL; | ||
160 | |||
161 | if (list_empty_careful(&pvt->free_list.list)) | ||
162 | if (fill_page_stack(&pvt->free_list, &pvt->mtx)) | ||
163 | return NULL; | ||
164 | |||
165 | f = kmalloc(sizeof(*f), GFP_KERNEL); | 149 | f = kmalloc(sizeof(*f), GFP_KERNEL); |
166 | if (!f) | 150 | if (!f) |
167 | return NULL; | 151 | return NULL; |
168 | memset(f, 0x0, sizeof(*f)); | 152 | memset(f, 0, sizeof(*f)); |
169 | 153 | ||
170 | /* array of mem struct pointers */ | 154 | /* array of mem struct pointers */ |
171 | f->mem = kmalloc(n * sizeof(*f->mem), GFP_KERNEL); | 155 | f->mem = kmalloc(n * sizeof(*f->mem), GFP_KERNEL); |
172 | if (!f->mem) { | ||
173 | kfree(f); return NULL; | ||
174 | } | ||
175 | memset(f->mem, 0x0, n * sizeof(*f->mem)); | ||
176 | 156 | ||
177 | /* array of physical addresses */ | 157 | /* array of physical addresses */ |
178 | f->pa = kmalloc(n * sizeof(*f->pa), GFP_KERNEL); | 158 | f->pa = kmalloc(n * sizeof(*f->pa), GFP_KERNEL); |
179 | if (!f->pa) { | ||
180 | kfree(f->mem); kfree(f); return NULL; | ||
181 | } | ||
182 | memset(f->pa, 0x0, n * sizeof(*f->pa)); | ||
183 | 159 | ||
184 | /* | 160 | /* no pages have been allocated yet (needed for cleanup) */ |
185 | * store the number of mem structs so that we | 161 | f->num = 0; |
186 | * know how many to free later. | 162 | |
187 | */ | 163 | if (!f->mem || !f->pa) |
188 | f->num = n; | 164 | goto cleanup; |
189 | 165 | ||
190 | for (i = 0; i < n; i++) { | 166 | memset(f->mem, 0, n * sizeof(*f->mem)); |
191 | if (list_empty_careful(&pvt->free_list.list)) | 167 | memset(f->pa, 0, n * sizeof(*f->pa)); |
192 | if (fill_page_stack(&pvt->free_list, &pvt->mtx)) | 168 | |
169 | /* fill out fast struct mem array with free pages */ | ||
170 | mutex_lock(&mtx); | ||
171 | while (f->num < n) { | ||
172 | /* if there is a free cached page use it */ | ||
173 | if (!list_empty(&free_list)) { | ||
174 | /* unbind first element from list */ | ||
175 | m = list_first_entry(&free_list, typeof(*m), list); | ||
176 | list_del(&m->list); | ||
177 | } else { | ||
178 | mutex_unlock(&mtx); | ||
179 | |||
180 | /** | ||
181 | * Unlock mutex during allocation and cache flushing. | ||
182 | */ | ||
183 | m = alloc_mem(); | ||
184 | if (!m) | ||
193 | goto cleanup; | 185 | goto cleanup; |
194 | 186 | ||
195 | mutex_lock(&pvt->mtx); | 187 | mutex_lock(&mtx); |
196 | pos = NULL; | 188 | total_mem += PAGE_SIZE; |
197 | q = NULL; | ||
198 | |||
199 | /* | ||
200 | * remove one mem struct from the free list and | ||
201 | * add the address to the fast struct mem array | ||
202 | */ | ||
203 | list_for_each_safe(pos, q, &pvt->free_list.list) { | ||
204 | m = list_entry(pos, struct mem, list); | ||
205 | f->mem[i] = m; | ||
206 | list_del(pos); | ||
207 | break; | ||
208 | } | 189 | } |
209 | mutex_unlock(&pvt->mtx); | ||
210 | 190 | ||
211 | if (m != NULL) | 191 | f->mem[f->num] = m; |
212 | f->pa[i] = m->pa; | 192 | f->pa[f->num++] = m->pa; |
213 | else | ||
214 | goto cleanup; | ||
215 | } | 193 | } |
216 | 194 | ||
217 | mutex_lock(&pvt->mtx); | 195 | list_add(&f->list, &pvt->fast_list); |
218 | list_add(&f->list, &pvt->fast_list.list); | 196 | mutex_unlock(&mtx); |
219 | mutex_unlock(&pvt->mtx); | 197 | return f->pa; |
220 | 198 | ||
221 | if (f != NULL) | ||
222 | return f->pa; | ||
223 | cleanup: | 199 | cleanup: |
224 | for (; i > 0; i--) { | 200 | free_fast(f); |
225 | mutex_lock(&pvt->mtx); | ||
226 | list_add(&f->mem[i - 1]->list, &pvt->free_list.list); | ||
227 | mutex_unlock(&pvt->mtx); | ||
228 | } | ||
229 | kfree(f->pa); | ||
230 | kfree(f->mem); | ||
231 | kfree(f); | ||
232 | return NULL; | 201 | return NULL; |
233 | } | 202 | } |
234 | 203 | ||
235 | static void tmm_pat_free_pages(struct tmm *tmm, u32 *list) | 204 | static void tmm_pat_free_pages(struct tmm *tmm, u32 *page_list) |
236 | { | 205 | { |
237 | struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; | 206 | struct dmm_mem *pvt = (struct dmm_mem *) tmm->pvt; |
238 | struct list_head *pos = NULL, *q = NULL; | 207 | struct fast *f, *f_; |
239 | struct fast *f = NULL; | ||
240 | s32 i = 0; | ||
241 | 208 | ||
242 | mutex_lock(&pvt->mtx); | 209 | mutex_lock(&mtx); |
243 | pos = NULL; | 210 | /* find fast struct based on 1st page */ |
244 | q = NULL; | 211 | list_for_each_entry_safe(f, f_, &pvt->fast_list, list) { |
245 | list_for_each_safe(pos, q, &pvt->fast_list.list) { | 212 | if (f->pa[0] == page_list[0]) { |
246 | f = list_entry(pos, struct fast, list); | 213 | free_fast(f); |
247 | if (f->pa[0] == list[0]) { | ||
248 | for (i = 0; i < f->num; i++) { | ||
249 | if (count < PAGE_CAP) { | ||
250 | list_add( | ||
251 | &((struct mem *)f->mem[i])->list, | ||
252 | &pvt->free_list.list); | ||
253 | } else { | ||
254 | __free_page( | ||
255 | ((struct mem *)f->mem[i])->pg); | ||
256 | count--; | ||
257 | } | ||
258 | } | ||
259 | list_del(pos); | ||
260 | kfree(f->pa); | ||
261 | kfree(f->mem); | ||
262 | kfree(f); | ||
263 | break; | 214 | break; |
264 | } | 215 | } |
265 | } | 216 | } |
266 | mutex_unlock(&pvt->mtx); | 217 | mutex_unlock(&mtx); |
267 | } | 218 | } |
268 | 219 | ||
269 | static s32 tmm_pat_map(struct tmm *tmm, struct pat_area area, u32 page_pa) | 220 | static s32 tmm_pat_map(struct tmm *tmm, struct pat_area area, u32 page_pa) |
@@ -298,15 +249,12 @@ struct tmm *tmm_pat_init(u32 pat_id) | |||
298 | if (pvt) { | 249 | if (pvt) { |
299 | /* private data */ | 250 | /* private data */ |
300 | pvt->dmm = dmm; | 251 | pvt->dmm = dmm; |
301 | INIT_LIST_HEAD(&pvt->free_list.list); | 252 | INIT_LIST_HEAD(&pvt->fast_list); |
302 | INIT_LIST_HEAD(&pvt->used_list.list); | ||
303 | INIT_LIST_HEAD(&pvt->fast_list.list); | ||
304 | mutex_init(&pvt->mtx); | ||
305 | 253 | ||
306 | count = 0; | 254 | /* increate tmm_pat references */ |
307 | if (list_empty_careful(&pvt->free_list.list)) | 255 | mutex_lock(&mtx); |
308 | if (fill_page_stack(&pvt->free_list, &pvt->mtx)) | 256 | refs++; |
309 | goto error; | 257 | mutex_unlock(&mtx); |
310 | 258 | ||
311 | /* public data */ | 259 | /* public data */ |
312 | tmm->pvt = pvt; | 260 | tmm->pvt = pvt; |
@@ -319,7 +267,6 @@ struct tmm *tmm_pat_init(u32 pat_id) | |||
319 | return tmm; | 267 | return tmm; |
320 | } | 268 | } |
321 | 269 | ||
322 | error: | ||
323 | kfree(pvt); | 270 | kfree(pvt); |
324 | kfree(tmm); | 271 | kfree(tmm); |
325 | dmm_pat_release(dmm); | 272 | dmm_pat_release(dmm); |
diff --git a/drivers/media/video/tiler/tmm.h b/drivers/media/video/tiler/tmm.h index 80d7001c14b..31470b3e44d 100644 --- a/drivers/media/video/tiler/tmm.h +++ b/drivers/media/video/tiler/tmm.h | |||
@@ -3,6 +3,8 @@ | |||
3 | * | 3 | * |
4 | * TMM interface definition for TI TILER. | 4 | * TMM interface definition for TI TILER. |
5 | * | 5 | * |
6 | * Author: Lajos Molnar <molnar@ti.com> | ||
7 | * | ||
6 | * Copyright (C) 2009-2010 Texas Instruments, Inc. | 8 | * Copyright (C) 2009-2010 Texas Instruments, Inc. |
7 | * | 9 | * |
8 | * This package is free software; you can redistribute it and/or modify | 10 | * This package is free software; you can redistribute it and/or modify |
@@ -24,7 +26,7 @@ struct tmm { | |||
24 | void *pvt; | 26 | void *pvt; |
25 | 27 | ||
26 | /* function table */ | 28 | /* function table */ |
27 | u32 *(*get) (struct tmm *tmm, s32 num_pages); | 29 | u32 *(*get) (struct tmm *tmm, u32 num_pages); |
28 | void (*free) (struct tmm *tmm, u32 *pages); | 30 | void (*free) (struct tmm *tmm, u32 *pages); |
29 | s32 (*map) (struct tmm *tmm, struct pat_area area, u32 page_pa); | 31 | s32 (*map) (struct tmm *tmm, struct pat_area area, u32 page_pa); |
30 | void (*clear) (struct tmm *tmm, struct pat_area area); | 32 | void (*clear) (struct tmm *tmm, struct pat_area area); |
@@ -36,7 +38,7 @@ struct tmm { | |||
36 | * @return a pointer to a list of physical page addresses. | 38 | * @return a pointer to a list of physical page addresses. |
37 | */ | 39 | */ |
38 | static inline | 40 | static inline |
39 | u32 *tmm_get(struct tmm *tmm, s32 num_pages) | 41 | u32 *tmm_get(struct tmm *tmm, u32 num_pages) |
40 | { | 42 | { |
41 | if (tmm && tmm->pvt) | 43 | if (tmm && tmm->pvt) |
42 | return tmm->get(tmm, num_pages); | 44 | return tmm->get(tmm, num_pages); |