aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/sysdev
diff options
context:
space:
mode:
authorTimur Tabi <timur@freescale.com>2007-05-08 15:46:36 -0400
committerKumar Gala <galak@kernel.crashing.org>2007-05-10 00:01:43 -0400
commit4c35630ccda56ed494f6102d2e147fefe14b78d2 (patch)
tree4f04754fb0ec6978923b3c1e0318997e420f6551 /arch/powerpc/sysdev
parent742226c579c573c24386aaf41969a01ee058b97e (diff)
[POWERPC] Change rheap functions to use ulongs instead of pointers
The rheap allocation functions return a pointer, but the actual value is based on how the heap was initialized, and so it can be anything, e.g. an offset into a buffer. A ulong is a better representation of the value returned by the allocation functions. This patch changes all of the relevant rheap functions to use a unsigned long integers instead of a pointer. In case of an error, the value returned is a negative error code that has been cast to an unsigned long. The caller can use the IS_ERR_VALUE() macro to check for this. All code which calls the rheap functions is updated accordingly. Macros IS_MURAM_ERR() and IS_DPERR(), have been deleted in favor of IS_ERR_VALUE(). Also added error checking to rh_attach_region(). Signed-off-by: Timur Tabi <timur@freescale.com> Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/sysdev')
-rw-r--r--arch/powerpc/sysdev/commproc.c20
-rw-r--r--arch/powerpc/sysdev/cpm2_common.c21
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c29
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c5
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c7
5 files changed, 41 insertions, 41 deletions
diff --git a/arch/powerpc/sysdev/commproc.c b/arch/powerpc/sysdev/commproc.c
index 9b4fafd9a840..4f67b89ba1d0 100644
--- a/arch/powerpc/sysdev/commproc.c
+++ b/arch/powerpc/sysdev/commproc.c
@@ -330,7 +330,7 @@ void m8xx_cpm_dpinit(void)
330 * with the processor and the microcode patches applied / activated. 330 * with the processor and the microcode patches applied / activated.
331 * But the following should be at least safe. 331 * But the following should be at least safe.
332 */ 332 */
333 rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE); 333 rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
334} 334}
335 335
336/* 336/*
@@ -338,9 +338,9 @@ void m8xx_cpm_dpinit(void)
338 * This function returns an offset into the DPRAM area. 338 * This function returns an offset into the DPRAM area.
339 * Use cpm_dpram_addr() to get the virtual address of the area. 339 * Use cpm_dpram_addr() to get the virtual address of the area.
340 */ 340 */
341uint cpm_dpalloc(uint size, uint align) 341unsigned long cpm_dpalloc(uint size, uint align)
342{ 342{
343 void *start; 343 unsigned long start;
344 unsigned long flags; 344 unsigned long flags;
345 345
346 spin_lock_irqsave(&cpm_dpmem_lock, flags); 346 spin_lock_irqsave(&cpm_dpmem_lock, flags);
@@ -352,30 +352,30 @@ uint cpm_dpalloc(uint size, uint align)
352} 352}
353EXPORT_SYMBOL(cpm_dpalloc); 353EXPORT_SYMBOL(cpm_dpalloc);
354 354
355int cpm_dpfree(uint offset) 355int cpm_dpfree(unsigned long offset)
356{ 356{
357 int ret; 357 int ret;
358 unsigned long flags; 358 unsigned long flags;
359 359
360 spin_lock_irqsave(&cpm_dpmem_lock, flags); 360 spin_lock_irqsave(&cpm_dpmem_lock, flags);
361 ret = rh_free(&cpm_dpmem_info, (void *)offset); 361 ret = rh_free(&cpm_dpmem_info, offset);
362 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 362 spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
363 363
364 return ret; 364 return ret;
365} 365}
366EXPORT_SYMBOL(cpm_dpfree); 366EXPORT_SYMBOL(cpm_dpfree);
367 367
368uint cpm_dpalloc_fixed(uint offset, uint size, uint align) 368unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
369{ 369{
370 void *start; 370 unsigned long start;
371 unsigned long flags; 371 unsigned long flags;
372 372
373 spin_lock_irqsave(&cpm_dpmem_lock, flags); 373 spin_lock_irqsave(&cpm_dpmem_lock, flags);
374 cpm_dpmem_info.alignment = align; 374 cpm_dpmem_info.alignment = align;
375 start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); 375 start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
376 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 376 spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
377 377
378 return (uint)start; 378 return start;
379} 379}
380EXPORT_SYMBOL(cpm_dpalloc_fixed); 380EXPORT_SYMBOL(cpm_dpalloc_fixed);
381 381
@@ -385,7 +385,7 @@ void cpm_dpdump(void)
385} 385}
386EXPORT_SYMBOL(cpm_dpdump); 386EXPORT_SYMBOL(cpm_dpdump);
387 387
388void *cpm_dpram_addr(uint offset) 388void *cpm_dpram_addr(unsigned long offset)
389{ 389{
390 return (void *)(dpram_vbase + offset); 390 return (void *)(dpram_vbase + offset);
391} 391}
diff --git a/arch/powerpc/sysdev/cpm2_common.c b/arch/powerpc/sysdev/cpm2_common.c
index ec265995d5d8..924412974795 100644
--- a/arch/powerpc/sysdev/cpm2_common.c
+++ b/arch/powerpc/sysdev/cpm2_common.c
@@ -248,15 +248,14 @@ static void cpm2_dpinit(void)
248 * varies with the processor and the microcode patches activated. 248 * varies with the processor and the microcode patches activated.
249 * But the following should be at least safe. 249 * But the following should be at least safe.
250 */ 250 */
251 rh_attach_region(&cpm_dpmem_info, (void *)CPM_DATAONLY_BASE, 251 rh_attach_region(&cpm_dpmem_info, CPM_DATAONLY_BASE, CPM_DATAONLY_SIZE);
252 CPM_DATAONLY_SIZE);
253} 252}
254 253
255/* This function returns an index into the DPRAM area. 254/* This function returns an index into the DPRAM area.
256 */ 255 */
257uint cpm_dpalloc(uint size, uint align) 256unsigned long cpm_dpalloc(uint size, uint align)
258{ 257{
259 void *start; 258 unsigned long start;
260 unsigned long flags; 259 unsigned long flags;
261 260
262 spin_lock_irqsave(&cpm_dpmem_lock, flags); 261 spin_lock_irqsave(&cpm_dpmem_lock, flags);
@@ -268,13 +267,13 @@ uint cpm_dpalloc(uint size, uint align)
268} 267}
269EXPORT_SYMBOL(cpm_dpalloc); 268EXPORT_SYMBOL(cpm_dpalloc);
270 269
271int cpm_dpfree(uint offset) 270int cpm_dpfree(unsigned long offset)
272{ 271{
273 int ret; 272 int ret;
274 unsigned long flags; 273 unsigned long flags;
275 274
276 spin_lock_irqsave(&cpm_dpmem_lock, flags); 275 spin_lock_irqsave(&cpm_dpmem_lock, flags);
277 ret = rh_free(&cpm_dpmem_info, (void *)offset); 276 ret = rh_free(&cpm_dpmem_info, offset);
278 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 277 spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
279 278
280 return ret; 279 return ret;
@@ -282,17 +281,17 @@ int cpm_dpfree(uint offset)
282EXPORT_SYMBOL(cpm_dpfree); 281EXPORT_SYMBOL(cpm_dpfree);
283 282
284/* not sure if this is ever needed */ 283/* not sure if this is ever needed */
285uint cpm_dpalloc_fixed(uint offset, uint size, uint align) 284unsigned long cpm_dpalloc_fixed(unsigned long offset, uint size, uint align)
286{ 285{
287 void *start; 286 unsigned long start;
288 unsigned long flags; 287 unsigned long flags;
289 288
290 spin_lock_irqsave(&cpm_dpmem_lock, flags); 289 spin_lock_irqsave(&cpm_dpmem_lock, flags);
291 cpm_dpmem_info.alignment = align; 290 cpm_dpmem_info.alignment = align;
292 start = rh_alloc_fixed(&cpm_dpmem_info, (void *)offset, size, "commproc"); 291 start = rh_alloc_fixed(&cpm_dpmem_info, offset, size, "commproc");
293 spin_unlock_irqrestore(&cpm_dpmem_lock, flags); 292 spin_unlock_irqrestore(&cpm_dpmem_lock, flags);
294 293
295 return (uint)start; 294 return start;
296} 295}
297EXPORT_SYMBOL(cpm_dpalloc_fixed); 296EXPORT_SYMBOL(cpm_dpalloc_fixed);
298 297
@@ -302,7 +301,7 @@ void cpm_dpdump(void)
302} 301}
303EXPORT_SYMBOL(cpm_dpdump); 302EXPORT_SYMBOL(cpm_dpdump);
304 303
305void *cpm_dpram_addr(uint offset) 304void *cpm_dpram_addr(unsigned long offset)
306{ 305{
307 return (void *)(im_dprambase + offset); 306 return (void *)(im_dprambase + offset);
308} 307}
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 7f4c07543961..90f87408b5d5 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -244,7 +244,7 @@ EXPORT_SYMBOL(qe_put_snum);
244static int qe_sdma_init(void) 244static int qe_sdma_init(void)
245{ 245{
246 struct sdma *sdma = &qe_immr->sdma; 246 struct sdma *sdma = &qe_immr->sdma;
247 u32 sdma_buf_offset; 247 unsigned long sdma_buf_offset;
248 248
249 if (!sdma) 249 if (!sdma)
250 return -ENODEV; 250 return -ENODEV;
@@ -252,10 +252,10 @@ static int qe_sdma_init(void)
252 /* allocate 2 internal temporary buffers (512 bytes size each) for 252 /* allocate 2 internal temporary buffers (512 bytes size each) for
253 * the SDMA */ 253 * the SDMA */
254 sdma_buf_offset = qe_muram_alloc(512 * 2, 4096); 254 sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
255 if (IS_MURAM_ERR(sdma_buf_offset)) 255 if (IS_ERR_VALUE(sdma_buf_offset))
256 return -ENOMEM; 256 return -ENOMEM;
257 257
258 out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK); 258 out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
259 out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | 259 out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
260 (0x1 << QE_SDMR_CEN_SHIFT))); 260 (0x1 << QE_SDMR_CEN_SHIFT)));
261 261
@@ -291,33 +291,32 @@ static void qe_muram_init(void)
291 if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) { 291 if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) {
292 address = *of_get_address(np, 0, &size, &flags); 292 address = *of_get_address(np, 0, &size, &flags);
293 of_node_put(np); 293 of_node_put(np);
294 rh_attach_region(&qe_muram_info, 294 rh_attach_region(&qe_muram_info, address, (int) size);
295 (void *)address, (int)size);
296 } 295 }
297} 296}
298 297
299/* This function returns an index into the MURAM area. 298/* This function returns an index into the MURAM area.
300 */ 299 */
301u32 qe_muram_alloc(u32 size, u32 align) 300unsigned long qe_muram_alloc(int size, int align)
302{ 301{
303 void *start; 302 unsigned long start;
304 unsigned long flags; 303 unsigned long flags;
305 304
306 spin_lock_irqsave(&qe_muram_lock, flags); 305 spin_lock_irqsave(&qe_muram_lock, flags);
307 start = rh_alloc_align(&qe_muram_info, size, align, "QE"); 306 start = rh_alloc_align(&qe_muram_info, size, align, "QE");
308 spin_unlock_irqrestore(&qe_muram_lock, flags); 307 spin_unlock_irqrestore(&qe_muram_lock, flags);
309 308
310 return (u32) start; 309 return start;
311} 310}
312EXPORT_SYMBOL(qe_muram_alloc); 311EXPORT_SYMBOL(qe_muram_alloc);
313 312
314int qe_muram_free(u32 offset) 313int qe_muram_free(unsigned long offset)
315{ 314{
316 int ret; 315 int ret;
317 unsigned long flags; 316 unsigned long flags;
318 317
319 spin_lock_irqsave(&qe_muram_lock, flags); 318 spin_lock_irqsave(&qe_muram_lock, flags);
320 ret = rh_free(&qe_muram_info, (void *)offset); 319 ret = rh_free(&qe_muram_info, offset);
321 spin_unlock_irqrestore(&qe_muram_lock, flags); 320 spin_unlock_irqrestore(&qe_muram_lock, flags);
322 321
323 return ret; 322 return ret;
@@ -325,16 +324,16 @@ int qe_muram_free(u32 offset)
325EXPORT_SYMBOL(qe_muram_free); 324EXPORT_SYMBOL(qe_muram_free);
326 325
327/* not sure if this is ever needed */ 326/* not sure if this is ever needed */
328u32 qe_muram_alloc_fixed(u32 offset, u32 size) 327unsigned long qe_muram_alloc_fixed(unsigned long offset, int size)
329{ 328{
330 void *start; 329 unsigned long start;
331 unsigned long flags; 330 unsigned long flags;
332 331
333 spin_lock_irqsave(&qe_muram_lock, flags); 332 spin_lock_irqsave(&qe_muram_lock, flags);
334 start = rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc"); 333 start = rh_alloc_fixed(&qe_muram_info, offset, size, "commproc");
335 spin_unlock_irqrestore(&qe_muram_lock, flags); 334 spin_unlock_irqrestore(&qe_muram_lock, flags);
336 335
337 return (u32) start; 336 return start;
338} 337}
339EXPORT_SYMBOL(qe_muram_alloc_fixed); 338EXPORT_SYMBOL(qe_muram_alloc_fixed);
340 339
@@ -344,7 +343,7 @@ void qe_muram_dump(void)
344} 343}
345EXPORT_SYMBOL(qe_muram_dump); 344EXPORT_SYMBOL(qe_muram_dump);
346 345
347void *qe_muram_addr(u32 offset) 346void *qe_muram_addr(unsigned long offset)
348{ 347{
349 return (void *)&qe_immr->muram[offset]; 348 return (void *)&qe_immr->muram[offset];
350} 349}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index 66137bf2dfb0..9143236853fc 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/stddef.h> 19#include <linux/stddef.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/err.h>
21 22
22#include <asm/io.h> 23#include <asm/io.h>
23#include <asm/immap_qe.h> 24#include <asm/immap_qe.h>
@@ -268,7 +269,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
268 /* Allocate memory for Tx Virtual Fifo */ 269 /* Allocate memory for Tx Virtual Fifo */
269 uccf->ucc_fast_tx_virtual_fifo_base_offset = 270 uccf->ucc_fast_tx_virtual_fifo_base_offset =
270 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 271 qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
271 if (IS_MURAM_ERR(uccf->ucc_fast_tx_virtual_fifo_base_offset)) { 272 if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
272 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO", __FUNCTION__); 273 printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO", __FUNCTION__);
273 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0; 274 uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
274 ucc_fast_free(uccf); 275 ucc_fast_free(uccf);
@@ -280,7 +281,7 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
280 qe_muram_alloc(uf_info->urfs + 281 qe_muram_alloc(uf_info->urfs +
281 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR, 282 UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
282 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT); 283 UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
283 if (IS_MURAM_ERR(uccf->ucc_fast_rx_virtual_fifo_base_offset)) { 284 if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
284 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO", __FUNCTION__); 285 printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO", __FUNCTION__);
285 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0; 286 uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
286 ucc_fast_free(uccf); 287 ucc_fast_free(uccf);
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index b930d686a4d1..1f65c26ce63f 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/stddef.h> 19#include <linux/stddef.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/err.h>
21 22
22#include <asm/io.h> 23#include <asm/io.h>
23#include <asm/immap_qe.h> 24#include <asm/immap_qe.h>
@@ -175,7 +176,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
175 /* Get PRAM base */ 176 /* Get PRAM base */
176 uccs->us_pram_offset = 177 uccs->us_pram_offset =
177 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM); 178 qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
178 if (IS_MURAM_ERR(uccs->us_pram_offset)) { 179 if (IS_ERR_VALUE(uccs->us_pram_offset)) {
179 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __FUNCTION__); 180 printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __FUNCTION__);
180 ucc_slow_free(uccs); 181 ucc_slow_free(uccs);
181 return -ENOMEM; 182 return -ENOMEM;
@@ -210,7 +211,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
210 uccs->rx_base_offset = 211 uccs->rx_base_offset =
211 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd), 212 qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
212 QE_ALIGNMENT_OF_BD); 213 QE_ALIGNMENT_OF_BD);
213 if (IS_MURAM_ERR(uccs->rx_base_offset)) { 214 if (IS_ERR_VALUE(uccs->rx_base_offset)) {
214 printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__); 215 printk(KERN_ERR "%s: cannot allocate RX BDs", __FUNCTION__);
215 uccs->rx_base_offset = 0; 216 uccs->rx_base_offset = 0;
216 ucc_slow_free(uccs); 217 ucc_slow_free(uccs);
@@ -220,7 +221,7 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
220 uccs->tx_base_offset = 221 uccs->tx_base_offset =
221 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd), 222 qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
222 QE_ALIGNMENT_OF_BD); 223 QE_ALIGNMENT_OF_BD);
223 if (IS_MURAM_ERR(uccs->tx_base_offset)) { 224 if (IS_ERR_VALUE(uccs->tx_base_offset)) {
224 printk(KERN_ERR "%s: cannot allocate TX BDs", __FUNCTION__); 225 printk(KERN_ERR "%s: cannot allocate TX BDs", __FUNCTION__);
225 uccs->tx_base_offset = 0; 226 uccs->tx_base_offset = 0;
226 ucc_slow_free(uccs); 227 ucc_slow_free(uccs);