diff options
author | Ernesto Ramos <ernesto@ti.com> | 2010-07-28 10:45:24 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2010-07-28 11:22:41 -0400 |
commit | e6486d8cee8d5be063cc2971ad274eb90f27e18c (patch) | |
tree | 4423176ce327d7120184fbe2503fb20347ca2eae | |
parent | ecd3d0ca4006f86a542bfd5ed1277cac6b2df3be (diff) |
staging:ti dspbridge: remove DSP_SUCCEEDED macro from core
Since status succeeded is now 0 macro DSP_SUCCEEDED
is not necessary anymore.
Signed-off-by: Ernesto Ramos <ernesto@ti.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
-rw-r--r-- | drivers/staging/tidspbridge/core/chnl_sm.c | 22 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/dsp-clock.c | 4 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/io_sm.c | 47 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/msg_sm.c | 22 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/tiomap3430.c | 57 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/tiomap3430_pwr.c | 6 | ||||
-rw-r--r-- | drivers/staging/tidspbridge/core/tiomap_io.c | 41 |
7 files changed, 91 insertions, 108 deletions
diff --git a/drivers/staging/tidspbridge/core/chnl_sm.c b/drivers/staging/tidspbridge/core/chnl_sm.c index 69c4784338f4..1b231414589d 100644 --- a/drivers/staging/tidspbridge/core/chnl_sm.c +++ b/drivers/staging/tidspbridge/core/chnl_sm.c | |||
@@ -170,7 +170,7 @@ func_cont: | |||
170 | omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); | 170 | omap_mbox_disable_irq(dev_ctxt->mbox, IRQ_RX); |
171 | if (pchnl->chnl_type == CHNL_PCPY) { | 171 | if (pchnl->chnl_type == CHNL_PCPY) { |
172 | /* This is a processor-copy channel. */ | 172 | /* This is a processor-copy channel. */ |
173 | if (DSP_SUCCEEDED(status) && CHNL_IS_OUTPUT(pchnl->chnl_mode)) { | 173 | if (!status && CHNL_IS_OUTPUT(pchnl->chnl_mode)) { |
174 | /* Check buffer size on output channels for fit. */ | 174 | /* Check buffer size on output channels for fit. */ |
175 | if (byte_size > | 175 | if (byte_size > |
176 | io_buf_size(pchnl->chnl_mgr_obj->hio_mgr)) | 176 | io_buf_size(pchnl->chnl_mgr_obj->hio_mgr)) |
@@ -178,7 +178,7 @@ func_cont: | |||
178 | 178 | ||
179 | } | 179 | } |
180 | } | 180 | } |
181 | if (DSP_SUCCEEDED(status)) { | 181 | if (!status) { |
182 | /* Get a free chirp: */ | 182 | /* Get a free chirp: */ |
183 | chnl_packet_obj = | 183 | chnl_packet_obj = |
184 | (struct chnl_irp *)lst_get_head(pchnl->free_packets_list); | 184 | (struct chnl_irp *)lst_get_head(pchnl->free_packets_list); |
@@ -186,7 +186,7 @@ func_cont: | |||
186 | status = -EIO; | 186 | status = -EIO; |
187 | 187 | ||
188 | } | 188 | } |
189 | if (DSP_SUCCEEDED(status)) { | 189 | if (!status) { |
190 | /* Enqueue the chirp on the chnl's IORequest queue: */ | 190 | /* Enqueue the chirp on the chnl's IORequest queue: */ |
191 | chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf = | 191 | chnl_packet_obj->host_user_buf = chnl_packet_obj->host_sys_buf = |
192 | host_buf; | 192 | host_buf; |
@@ -330,7 +330,7 @@ int bridge_chnl_close(struct chnl_object *chnl_obj) | |||
330 | status = bridge_chnl_cancel_io(chnl_obj); | 330 | status = bridge_chnl_cancel_io(chnl_obj); |
331 | } | 331 | } |
332 | func_cont: | 332 | func_cont: |
333 | if (DSP_SUCCEEDED(status)) { | 333 | if (!status) { |
334 | /* Assert I/O on this channel is now cancelled: Protects | 334 | /* Assert I/O on this channel is now cancelled: Protects |
335 | * from io_dpc. */ | 335 | * from io_dpc. */ |
336 | DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL)); | 336 | DBC_ASSERT((pchnl->dw_state & CHNL_STATECANCEL)); |
@@ -420,8 +420,7 @@ int bridge_chnl_create(struct chnl_mgr **channel_mgr, | |||
420 | chnl_mgr_obj->dw_output_mask = 0; | 420 | chnl_mgr_obj->dw_output_mask = 0; |
421 | chnl_mgr_obj->dw_last_output = 0; | 421 | chnl_mgr_obj->dw_last_output = 0; |
422 | chnl_mgr_obj->hdev_obj = hdev_obj; | 422 | chnl_mgr_obj->hdev_obj = hdev_obj; |
423 | if (DSP_SUCCEEDED(status)) | 423 | spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock); |
424 | spin_lock_init(&chnl_mgr_obj->chnl_mgr_lock); | ||
425 | } else { | 424 | } else { |
426 | status = -ENOMEM; | 425 | status = -ENOMEM; |
427 | } | 426 | } |
@@ -499,7 +498,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout) | |||
499 | } else { | 498 | } else { |
500 | status = -EFAULT; | 499 | status = -EFAULT; |
501 | } | 500 | } |
502 | if (DSP_SUCCEEDED(status)) { | 501 | if (!status) { |
503 | /* Note: Currently, if another thread continues to add IO | 502 | /* Note: Currently, if another thread continues to add IO |
504 | * requests to this channel, this function will continue to | 503 | * requests to this channel, this function will continue to |
505 | * flush all such queued IO requests. */ | 504 | * flush all such queued IO requests. */ |
@@ -507,8 +506,7 @@ int bridge_chnl_flush_io(struct chnl_object *chnl_obj, u32 timeout) | |||
507 | && (pchnl->chnl_type == CHNL_PCPY)) { | 506 | && (pchnl->chnl_type == CHNL_PCPY)) { |
508 | /* Wait for IO completions, up to the specified | 507 | /* Wait for IO completions, up to the specified |
509 | * timeout: */ | 508 | * timeout: */ |
510 | while (!LST_IS_EMPTY(pchnl->pio_requests) && | 509 | while (!LST_IS_EMPTY(pchnl->pio_requests) && !status) { |
511 | DSP_SUCCEEDED(status)) { | ||
512 | status = bridge_chnl_get_ioc(chnl_obj, | 510 | status = bridge_chnl_get_ioc(chnl_obj, |
513 | timeout, &chnl_ioc_obj); | 511 | timeout, &chnl_ioc_obj); |
514 | if (DSP_FAILED(status)) | 512 | if (DSP_FAILED(status)) |
@@ -833,7 +831,7 @@ int bridge_chnl_open(struct chnl_object **chnl, | |||
833 | else | 831 | else |
834 | status = -ENOMEM; | 832 | status = -ENOMEM; |
835 | 833 | ||
836 | if (DSP_SUCCEEDED(status)) { | 834 | if (!status) { |
837 | pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), | 835 | pchnl->ntfy_obj = kmalloc(sizeof(struct ntfy_object), |
838 | GFP_KERNEL); | 836 | GFP_KERNEL); |
839 | if (pchnl->ntfy_obj) | 837 | if (pchnl->ntfy_obj) |
@@ -842,7 +840,7 @@ int bridge_chnl_open(struct chnl_object **chnl, | |||
842 | status = -ENOMEM; | 840 | status = -ENOMEM; |
843 | } | 841 | } |
844 | 842 | ||
845 | if (DSP_SUCCEEDED(status)) { | 843 | if (!status) { |
846 | if (pchnl->pio_completions && pchnl->pio_requests && | 844 | if (pchnl->pio_completions && pchnl->pio_requests && |
847 | pchnl->free_packets_list) { | 845 | pchnl->free_packets_list) { |
848 | /* Initialize CHNL object fields: */ | 846 | /* Initialize CHNL object fields: */ |
@@ -897,7 +895,7 @@ int bridge_chnl_open(struct chnl_object **chnl, | |||
897 | *chnl = pchnl; | 895 | *chnl = pchnl; |
898 | } | 896 | } |
899 | func_end: | 897 | func_end: |
900 | DBC_ENSURE((DSP_SUCCEEDED(status) && pchnl) || (*chnl == NULL)); | 898 | DBC_ENSURE((!status && pchnl) || (*chnl == NULL)); |
901 | return status; | 899 | return status; |
902 | } | 900 | } |
903 | 901 | ||
diff --git a/drivers/staging/tidspbridge/core/dsp-clock.c b/drivers/staging/tidspbridge/core/dsp-clock.c index b474e833d1ae..5b1a0c5bb143 100644 --- a/drivers/staging/tidspbridge/core/dsp-clock.c +++ b/drivers/staging/tidspbridge/core/dsp-clock.c | |||
@@ -285,7 +285,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id) | |||
285 | status = -EPERM; | 285 | status = -EPERM; |
286 | } | 286 | } |
287 | 287 | ||
288 | if (DSP_SUCCEEDED(status)) | 288 | if (!status) |
289 | set_dsp_clk_active(&dsp_clocks, clk_id); | 289 | set_dsp_clk_active(&dsp_clocks, clk_id); |
290 | 290 | ||
291 | out: | 291 | out: |
@@ -354,7 +354,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id) | |||
354 | status = -EPERM; | 354 | status = -EPERM; |
355 | } | 355 | } |
356 | 356 | ||
357 | if (DSP_SUCCEEDED(status)) | 357 | if (!status) |
358 | set_dsp_clk_inactive(&dsp_clocks, clk_id); | 358 | set_dsp_clk_inactive(&dsp_clocks, clk_id); |
359 | 359 | ||
360 | out: | 360 | out: |
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c index e5e368497e05..1d433a93b4e3 100644 --- a/drivers/staging/tidspbridge/core/io_sm.c +++ b/drivers/staging/tidspbridge/core/io_sm.c | |||
@@ -230,11 +230,10 @@ int bridge_io_create(struct io_mgr **io_man, | |||
230 | 230 | ||
231 | spin_lock_init(&pio_mgr->dpc_lock); | 231 | spin_lock_init(&pio_mgr->dpc_lock); |
232 | 232 | ||
233 | if (DSP_SUCCEEDED(status)) | 233 | status = dev_get_dev_node(hdev_obj, &dev_node_obj); |
234 | status = dev_get_dev_node(hdev_obj, &dev_node_obj); | ||
235 | } | 234 | } |
236 | 235 | ||
237 | if (DSP_SUCCEEDED(status)) { | 236 | if (!status) { |
238 | pio_mgr->hbridge_context = hbridge_context; | 237 | pio_mgr->hbridge_context = hbridge_context; |
239 | pio_mgr->shared_irq = mgr_attrts->irq_shared; | 238 | pio_mgr->shared_irq = mgr_attrts->irq_shared; |
240 | if (dsp_wdt_init()) | 239 | if (dsp_wdt_init()) |
@@ -378,15 +377,13 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
378 | dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n", | 377 | dev_dbg(bridge, "%s: (proc)proccopy shmmem size: 0x%x bytes\n", |
379 | __func__, (ul_shm_length - sizeof(struct shm))); | 378 | __func__, (ul_shm_length - sizeof(struct shm))); |
380 | 379 | ||
381 | if (DSP_SUCCEEDED(status)) { | 380 | /* Get start and length of message part of shared memory */ |
382 | /* Get start and length of message part of shared memory */ | 381 | status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM, |
383 | status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_BASE_SYM, | ||
384 | &ul_msg_base); | 382 | &ul_msg_base); |
385 | } | 383 | if (!status) { |
386 | if (DSP_SUCCEEDED(status)) { | ||
387 | status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM, | 384 | status = cod_get_sym_value(cod_man, MSG_SHARED_BUFFER_LIMIT_SYM, |
388 | &ul_msg_limit); | 385 | &ul_msg_limit); |
389 | if (DSP_SUCCEEDED(status)) { | 386 | if (!status) { |
390 | if (ul_msg_limit <= ul_msg_base) { | 387 | if (ul_msg_limit <= ul_msg_base) { |
391 | status = -EINVAL; | 388 | status = -EINVAL; |
392 | } else { | 389 | } else { |
@@ -409,7 +406,7 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
409 | } else { | 406 | } else { |
410 | status = -EFAULT; | 407 | status = -EFAULT; |
411 | } | 408 | } |
412 | if (DSP_SUCCEEDED(status)) { | 409 | if (!status) { |
413 | #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) | 410 | #if defined(CONFIG_TIDSPBRIDGE_BACKTRACE) || defined(CONFIG_TIDSPBRIDGE_DEBUG) |
414 | status = | 411 | status = |
415 | cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end); | 412 | cod_get_sym_value(cod_man, DSP_TRACESEC_END, &shm0_end); |
@@ -420,18 +417,18 @@ int bridge_io_on_loaded(struct io_mgr *hio_mgr) | |||
420 | if (DSP_FAILED(status)) | 417 | if (DSP_FAILED(status)) |
421 | status = -EFAULT; | 418 | status = -EFAULT; |
422 | } | 419 | } |
423 | if (DSP_SUCCEEDED(status)) { | 420 | if (!status) { |
424 | status = | 421 | status = |
425 | cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base); | 422 | cod_get_sym_value(cod_man, DYNEXTBASE, &ul_dyn_ext_base); |
426 | if (DSP_FAILED(status)) | 423 | if (DSP_FAILED(status)) |
427 | status = -EFAULT; | 424 | status = -EFAULT; |
428 | } | 425 | } |
429 | if (DSP_SUCCEEDED(status)) { | 426 | if (!status) { |
430 | status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end); | 427 | status = cod_get_sym_value(cod_man, EXTEND, &ul_ext_end); |
431 | if (DSP_FAILED(status)) | 428 | if (DSP_FAILED(status)) |
432 | status = -EFAULT; | 429 | status = -EFAULT; |
433 | } | 430 | } |
434 | if (DSP_SUCCEEDED(status)) { | 431 | if (!status) { |
435 | /* Get memory reserved in host resources */ | 432 | /* Get memory reserved in host resources */ |
436 | (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *) | 433 | (void)mgr_enum_processor_info(0, (struct dsp_processorinfo *) |
437 | &hio_mgr->ext_proc_info, | 434 | &hio_mgr->ext_proc_info, |
@@ -1551,7 +1548,7 @@ static int register_shm_segs(struct io_mgr *hio_mgr, | |||
1551 | goto func_end; | 1548 | goto func_end; |
1552 | } | 1549 | } |
1553 | /* Get end of 1st SM Heap region */ | 1550 | /* Get end of 1st SM Heap region */ |
1554 | if (DSP_SUCCEEDED(status)) { | 1551 | if (!status) { |
1555 | /* Get start and length of message part of shared memory */ | 1552 | /* Get start and length of message part of shared memory */ |
1556 | status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, | 1553 | status = cod_get_sym_value(cod_man, SHM0_SHARED_END_SYM, |
1557 | &shm0_end); | 1554 | &shm0_end); |
@@ -1561,7 +1558,7 @@ static int register_shm_segs(struct io_mgr *hio_mgr, | |||
1561 | } | 1558 | } |
1562 | } | 1559 | } |
1563 | /* Start of Gpp reserved region */ | 1560 | /* Start of Gpp reserved region */ |
1564 | if (DSP_SUCCEEDED(status)) { | 1561 | if (!status) { |
1565 | /* Get start and length of message part of shared memory */ | 1562 | /* Get start and length of message part of shared memory */ |
1566 | status = | 1563 | status = |
1567 | cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM, | 1564 | cod_get_sym_value(cod_man, SHM0_SHARED_RESERVED_BASE_SYM, |
@@ -1572,15 +1569,15 @@ static int register_shm_segs(struct io_mgr *hio_mgr, | |||
1572 | } | 1569 | } |
1573 | } | 1570 | } |
1574 | /* Register with CMM */ | 1571 | /* Register with CMM */ |
1575 | if (DSP_SUCCEEDED(status)) { | 1572 | if (!status) { |
1576 | status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr); | 1573 | status = dev_get_cmm_mgr(hio_mgr->hdev_obj, &hio_mgr->hcmm_mgr); |
1577 | if (DSP_SUCCEEDED(status)) { | 1574 | if (!status) { |
1578 | status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr, | 1575 | status = cmm_un_register_gppsm_seg(hio_mgr->hcmm_mgr, |
1579 | CMM_ALLSEGMENTS); | 1576 | CMM_ALLSEGMENTS); |
1580 | } | 1577 | } |
1581 | } | 1578 | } |
1582 | /* Register new SM region(s) */ | 1579 | /* Register new SM region(s) */ |
1583 | if (DSP_SUCCEEDED(status) && (shm0_end - ul_shm0_base) > 0) { | 1580 | if (!status && (shm0_end - ul_shm0_base) > 0) { |
1584 | /* Calc size (bytes) of SM the GPP can alloc from */ | 1581 | /* Calc size (bytes) of SM the GPP can alloc from */ |
1585 | ul_rsrvd_size = | 1582 | ul_rsrvd_size = |
1586 | (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size; | 1583 | (shm0_end - ul_shm0_rsrvd_start + 1) * hio_mgr->word_size; |
@@ -1843,11 +1840,11 @@ int print_dsp_trace_buffer(struct bridge_dev_context *hbridge_context) | |||
1843 | } else { | 1840 | } else { |
1844 | status = -EFAULT; | 1841 | status = -EFAULT; |
1845 | } | 1842 | } |
1846 | if (DSP_SUCCEEDED(status)) | 1843 | if (!status) |
1847 | status = | 1844 | status = |
1848 | cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end); | 1845 | cod_get_sym_value(cod_mgr, COD_TRACEEND, &ul_trace_end); |
1849 | 1846 | ||
1850 | if (DSP_SUCCEEDED(status)) | 1847 | if (!status) |
1851 | /* trace_cur_pos will hold the address of a DSP pointer */ | 1848 | /* trace_cur_pos will hold the address of a DSP pointer */ |
1852 | status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS, | 1849 | status = cod_get_sym_value(cod_mgr, COD_TRACECURPOS, |
1853 | &trace_cur_pos); | 1850 | &trace_cur_pos); |
@@ -2013,7 +2010,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context) | |||
2013 | status = -EFAULT; | 2010 | status = -EFAULT; |
2014 | } | 2011 | } |
2015 | 2012 | ||
2016 | if (DSP_SUCCEEDED(status)) { | 2013 | if (!status) { |
2017 | status = dev_get_node_manager(dev_object, &node_mgr); | 2014 | status = dev_get_node_manager(dev_object, &node_mgr); |
2018 | if (!node_mgr) { | 2015 | if (!node_mgr) { |
2019 | pr_debug("%s: Failed on dev_get_node_manager.\n", | 2016 | pr_debug("%s: Failed on dev_get_node_manager.\n", |
@@ -2022,7 +2019,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context) | |||
2022 | } | 2019 | } |
2023 | } | 2020 | } |
2024 | 2021 | ||
2025 | if (DSP_SUCCEEDED(status)) { | 2022 | if (!status) { |
2026 | /* Look for SYS_PUTCBEG/SYS_PUTCEND: */ | 2023 | /* Look for SYS_PUTCBEG/SYS_PUTCEND: */ |
2027 | status = | 2024 | status = |
2028 | cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin); | 2025 | cod_get_sym_value(code_mgr, COD_TRACEBEG, &trace_begin); |
@@ -2032,7 +2029,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context) | |||
2032 | pr_debug("%s: Failed on cod_get_sym_value.\n", | 2029 | pr_debug("%s: Failed on cod_get_sym_value.\n", |
2033 | __func__); | 2030 | __func__); |
2034 | } | 2031 | } |
2035 | if (DSP_SUCCEEDED(status)) | 2032 | if (!status) |
2036 | status = dev_get_intf_fxns(dev_object, &intf_fxns); | 2033 | status = dev_get_intf_fxns(dev_object, &intf_fxns); |
2037 | /* | 2034 | /* |
2038 | * Check for the "magic number" in the trace buffer. If it has | 2035 | * Check for the "magic number" in the trace buffer. If it has |
@@ -2041,7 +2038,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context) | |||
2041 | */ | 2038 | */ |
2042 | mmu_fault_dbg_info.head[0] = 0; | 2039 | mmu_fault_dbg_info.head[0] = 0; |
2043 | mmu_fault_dbg_info.head[1] = 0; | 2040 | mmu_fault_dbg_info.head[1] = 0; |
2044 | if (DSP_SUCCEEDED(status)) { | 2041 | if (!status) { |
2045 | poll_cnt = 0; | 2042 | poll_cnt = 0; |
2046 | while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 || | 2043 | while ((mmu_fault_dbg_info.head[0] != MMU_FAULT_HEAD1 || |
2047 | mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) && | 2044 | mmu_fault_dbg_info.head[1] != MMU_FAULT_HEAD2) && |
@@ -2066,7 +2063,7 @@ int dump_dsp_stack(struct bridge_dev_context *bridge_context) | |||
2066 | } | 2063 | } |
2067 | } | 2064 | } |
2068 | 2065 | ||
2069 | if (DSP_SUCCEEDED(status)) { | 2066 | if (!status) { |
2070 | total_size = mmu_fault_dbg_info.size; | 2067 | total_size = mmu_fault_dbg_info.size; |
2071 | /* Limit the size in case DSP went crazy */ | 2068 | /* Limit the size in case DSP went crazy */ |
2072 | if (total_size > MAX_MMU_DBGBUFF) | 2069 | if (total_size > MAX_MMU_DBGBUFF) |
diff --git a/drivers/staging/tidspbridge/core/msg_sm.c b/drivers/staging/tidspbridge/core/msg_sm.c index 7f44294aca29..85ca448d10ba 100644 --- a/drivers/staging/tidspbridge/core/msg_sm.c +++ b/drivers/staging/tidspbridge/core/msg_sm.c | |||
@@ -102,7 +102,7 @@ int bridge_msg_create(struct msg_mgr **msg_man, | |||
102 | else | 102 | else |
103 | sync_init_event(msg_mgr_obj->sync_event); | 103 | sync_init_event(msg_mgr_obj->sync_event); |
104 | 104 | ||
105 | if (DSP_SUCCEEDED(status)) | 105 | if (!status) |
106 | *msg_man = msg_mgr_obj; | 106 | *msg_man = msg_mgr_obj; |
107 | else | 107 | else |
108 | delete_msg_mgr(msg_mgr_obj); | 108 | delete_msg_mgr(msg_mgr_obj); |
@@ -157,7 +157,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, | |||
157 | 157 | ||
158 | /* Create event that will be signalled when a message from | 158 | /* Create event that will be signalled when a message from |
159 | * the DSP is available. */ | 159 | * the DSP is available. */ |
160 | if (DSP_SUCCEEDED(status)) { | 160 | if (!status) { |
161 | msg_q->sync_event = kzalloc(sizeof(struct sync_object), | 161 | msg_q->sync_event = kzalloc(sizeof(struct sync_object), |
162 | GFP_KERNEL); | 162 | GFP_KERNEL); |
163 | if (msg_q->sync_event) | 163 | if (msg_q->sync_event) |
@@ -167,7 +167,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, | |||
167 | } | 167 | } |
168 | 168 | ||
169 | /* Create a notification list for message ready notification. */ | 169 | /* Create a notification list for message ready notification. */ |
170 | if (DSP_SUCCEEDED(status)) { | 170 | if (!status) { |
171 | msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), | 171 | msg_q->ntfy_obj = kmalloc(sizeof(struct ntfy_object), |
172 | GFP_KERNEL); | 172 | GFP_KERNEL); |
173 | if (msg_q->ntfy_obj) | 173 | if (msg_q->ntfy_obj) |
@@ -181,7 +181,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, | |||
181 | * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack | 181 | * unblock threads in MSG_Put() or MSG_Get(). sync_done_ack |
182 | * will be set by the unblocked thread to signal that it | 182 | * will be set by the unblocked thread to signal that it |
183 | * is unblocked and will no longer reference the object. */ | 183 | * is unblocked and will no longer reference the object. */ |
184 | if (DSP_SUCCEEDED(status)) { | 184 | if (!status) { |
185 | msg_q->sync_done = kzalloc(sizeof(struct sync_object), | 185 | msg_q->sync_done = kzalloc(sizeof(struct sync_object), |
186 | GFP_KERNEL); | 186 | GFP_KERNEL); |
187 | if (msg_q->sync_done) | 187 | if (msg_q->sync_done) |
@@ -190,7 +190,7 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, | |||
190 | status = -ENOMEM; | 190 | status = -ENOMEM; |
191 | } | 191 | } |
192 | 192 | ||
193 | if (DSP_SUCCEEDED(status)) { | 193 | if (!status) { |
194 | msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), | 194 | msg_q->sync_done_ack = kzalloc(sizeof(struct sync_object), |
195 | GFP_KERNEL); | 195 | GFP_KERNEL); |
196 | if (msg_q->sync_done_ack) | 196 | if (msg_q->sync_done_ack) |
@@ -199,13 +199,13 @@ int bridge_msg_create_queue(struct msg_mgr *hmsg_mgr, | |||
199 | status = -ENOMEM; | 199 | status = -ENOMEM; |
200 | } | 200 | } |
201 | 201 | ||
202 | if (DSP_SUCCEEDED(status)) { | 202 | if (!status) { |
203 | /* Enter critical section */ | 203 | /* Enter critical section */ |
204 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); | 204 | spin_lock_bh(&hmsg_mgr->msg_mgr_lock); |
205 | /* Initialize message frames and put in appropriate queues */ | 205 | /* Initialize message frames and put in appropriate queues */ |
206 | for (i = 0; i < max_msgs && DSP_SUCCEEDED(status); i++) { | 206 | for (i = 0; i < max_msgs && !status; i++) { |
207 | status = add_new_msg(hmsg_mgr->msg_free_list); | 207 | status = add_new_msg(hmsg_mgr->msg_free_list); |
208 | if (DSP_SUCCEEDED(status)) { | 208 | if (!status) { |
209 | num_allocated++; | 209 | num_allocated++; |
210 | status = add_new_msg(msg_q->msg_free_list); | 210 | status = add_new_msg(msg_q->msg_free_list); |
211 | } | 211 | } |
@@ -330,7 +330,7 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj, | |||
330 | } | 330 | } |
331 | /* Exit critical section */ | 331 | /* Exit critical section */ |
332 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | 332 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); |
333 | if (DSP_SUCCEEDED(status) && !got_msg) { | 333 | if (!status && !got_msg) { |
334 | /* Wait til message is available, timeout, or done. We don't | 334 | /* Wait til message is available, timeout, or done. We don't |
335 | * have to schedule the DPC, since the DSP will send messages | 335 | * have to schedule the DPC, since the DSP will send messages |
336 | * when they are available. */ | 336 | * when they are available. */ |
@@ -349,7 +349,7 @@ int bridge_msg_get(struct msg_queue *msg_queue_obj, | |||
349 | (void)sync_set_event(msg_queue_obj->sync_done_ack); | 349 | (void)sync_set_event(msg_queue_obj->sync_done_ack); |
350 | status = -EPERM; | 350 | status = -EPERM; |
351 | } else { | 351 | } else { |
352 | if (DSP_SUCCEEDED(status)) { | 352 | if (!status) { |
353 | DBC_ASSERT(!LST_IS_EMPTY | 353 | DBC_ASSERT(!LST_IS_EMPTY |
354 | (msg_queue_obj->msg_used_list)); | 354 | (msg_queue_obj->msg_used_list)); |
355 | /* Get msg from used list */ | 355 | /* Get msg from used list */ |
@@ -432,7 +432,7 @@ int bridge_msg_put(struct msg_queue *msg_queue_obj, | |||
432 | 432 | ||
433 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); | 433 | spin_unlock_bh(&hmsg_mgr->msg_mgr_lock); |
434 | } | 434 | } |
435 | if (DSP_SUCCEEDED(status) && !put_msg) { | 435 | if (!status && !put_msg) { |
436 | /* Wait til a free message frame is available, timeout, | 436 | /* Wait til a free message frame is available, timeout, |
437 | * or done */ | 437 | * or done */ |
438 | syncs[0] = hmsg_mgr->sync_event; | 438 | syncs[0] = hmsg_mgr->sync_event; |
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c index ae1f394904e2..8f25a05ea8ce 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430.c +++ b/drivers/staging/tidspbridge/core/tiomap3430.c | |||
@@ -262,7 +262,6 @@ void bridge_drv_entry(struct bridge_drv_interface **drv_intf, | |||
262 | */ | 262 | */ |
263 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) | 263 | static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) |
264 | { | 264 | { |
265 | int status = 0; | ||
266 | struct bridge_dev_context *dev_context = dev_ctxt; | 265 | struct bridge_dev_context *dev_context = dev_ctxt; |
267 | u32 temp; | 266 | u32 temp; |
268 | struct dspbridge_platform_data *pdata = | 267 | struct dspbridge_platform_data *pdata = |
@@ -291,11 +290,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt) | |||
291 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); | 290 | OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL); |
292 | dsp_clk_enable(DSP_CLK_IVA2); | 291 | dsp_clk_enable(DSP_CLK_IVA2); |
293 | 292 | ||
294 | if (DSP_SUCCEEDED(status)) { | 293 | /* set the device state to IDLE */ |
295 | /* set the device state to IDLE */ | 294 | dev_context->dw_brd_state = BRD_IDLE; |
296 | dev_context->dw_brd_state = BRD_IDLE; | 295 | |
297 | } | 296 | return 0; |
298 | return status; | ||
299 | } | 297 | } |
300 | 298 | ||
301 | /* | 299 | /* |
@@ -406,13 +404,13 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
406 | } else | 404 | } else |
407 | __raw_writel(0xffffffff, dw_sync_addr); | 405 | __raw_writel(0xffffffff, dw_sync_addr); |
408 | 406 | ||
409 | if (DSP_SUCCEEDED(status)) { | 407 | if (!status) { |
410 | resources = dev_context->resources; | 408 | resources = dev_context->resources; |
411 | if (!resources) | 409 | if (!resources) |
412 | status = -EPERM; | 410 | status = -EPERM; |
413 | 411 | ||
414 | /* Assert RST1 i.e only the RST only for DSP megacell */ | 412 | /* Assert RST1 i.e only the RST only for DSP megacell */ |
415 | if (DSP_SUCCEEDED(status)) { | 413 | if (!status) { |
416 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, | 414 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, |
417 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, | 415 | OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, |
418 | OMAP2_RM_RSTCTRL); | 416 | OMAP2_RM_RSTCTRL); |
@@ -428,7 +426,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
428 | OMAP343X_CONTROL_IVA2_BOOTMOD)); | 426 | OMAP343X_CONTROL_IVA2_BOOTMOD)); |
429 | } | 427 | } |
430 | } | 428 | } |
431 | if (DSP_SUCCEEDED(status)) { | 429 | if (!status) { |
432 | /* Reset and Unreset the RST2, so that BOOTADDR is copied to | 430 | /* Reset and Unreset the RST2, so that BOOTADDR is copied to |
433 | * IVA2 SYSC register */ | 431 | * IVA2 SYSC register */ |
434 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, | 432 | (*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, |
@@ -476,7 +474,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
476 | 474 | ||
477 | /* Lock the above TLB entries and get the BIOS and load monitor timer | 475 | /* Lock the above TLB entries and get the BIOS and load monitor timer |
478 | * information */ | 476 | * information */ |
479 | if (DSP_SUCCEEDED(status)) { | 477 | if (!status) { |
480 | hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); | 478 | hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx); |
481 | hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); | 479 | hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx); |
482 | hw_mmu_ttb_set(resources->dw_dmmu_base, | 480 | hw_mmu_ttb_set(resources->dw_dmmu_base, |
@@ -499,7 +497,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
499 | &ul_load_monitor_timer); | 497 | &ul_load_monitor_timer); |
500 | } | 498 | } |
501 | 499 | ||
502 | if (DSP_SUCCEEDED(status)) { | 500 | if (!status) { |
503 | if (ul_load_monitor_timer != 0xFFFF) { | 501 | if (ul_load_monitor_timer != 0xFFFF) { |
504 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | 502 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | |
505 | ul_load_monitor_timer; | 503 | ul_load_monitor_timer; |
@@ -510,7 +508,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
510 | } | 508 | } |
511 | } | 509 | } |
512 | 510 | ||
513 | if (DSP_SUCCEEDED(status)) { | 511 | if (!status) { |
514 | if (ul_bios_gp_timer != 0xFFFF) { | 512 | if (ul_bios_gp_timer != 0xFFFF) { |
515 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | | 513 | clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) | |
516 | ul_bios_gp_timer; | 514 | ul_bios_gp_timer; |
@@ -521,7 +519,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
521 | } | 519 | } |
522 | } | 520 | } |
523 | 521 | ||
524 | if (DSP_SUCCEEDED(status)) { | 522 | if (!status) { |
525 | /* Set the DSP clock rate */ | 523 | /* Set the DSP clock rate */ |
526 | (void)dev_get_symbol(dev_context->hdev_obj, | 524 | (void)dev_get_symbol(dev_context->hdev_obj, |
527 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); | 525 | "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr); |
@@ -551,7 +549,7 @@ static int bridge_brd_start(struct bridge_dev_context *dev_ctxt, | |||
551 | } | 549 | } |
552 | 550 | ||
553 | } | 551 | } |
554 | if (DSP_SUCCEEDED(status)) { | 552 | if (!status) { |
555 | dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg; | 553 | dev_context->mbox->rxq->callback = (int (*)(void *))io_mbox_msg; |
556 | 554 | ||
557 | /*PM_IVA2GRPSEL_PER = 0xC0;*/ | 555 | /*PM_IVA2GRPSEL_PER = 0xC0;*/ |
@@ -908,7 +906,7 @@ static int bridge_dev_create(struct bridge_dev_context | |||
908 | else | 906 | else |
909 | status = -ENOMEM; | 907 | status = -ENOMEM; |
910 | 908 | ||
911 | if (DSP_SUCCEEDED(status)) { | 909 | if (!status) { |
912 | spin_lock_init(&pt_attrs->pg_lock); | 910 | spin_lock_init(&pt_attrs->pg_lock); |
913 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; | 911 | dev_context->tc_word_swap_on = drv_datap->tc_wordswapon; |
914 | 912 | ||
@@ -918,7 +916,7 @@ static int bridge_dev_create(struct bridge_dev_context | |||
918 | * resources struct */ | 916 | * resources struct */ |
919 | dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; | 917 | dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base; |
920 | } | 918 | } |
921 | if (DSP_SUCCEEDED(status)) { | 919 | if (!status) { |
922 | dev_context->hdev_obj = hdev_obj; | 920 | dev_context->hdev_obj = hdev_obj; |
923 | /* Store current board state. */ | 921 | /* Store current board state. */ |
924 | dev_context->dw_brd_state = BRD_STOPPED; | 922 | dev_context->dw_brd_state = BRD_STOPPED; |
@@ -1111,13 +1109,13 @@ static int bridge_brd_mem_copy(struct bridge_dev_context *dev_ctxt, | |||
1111 | u32 total_bytes = ul_num_bytes; | 1109 | u32 total_bytes = ul_num_bytes; |
1112 | u8 host_buf[BUFFERSIZE]; | 1110 | u8 host_buf[BUFFERSIZE]; |
1113 | struct bridge_dev_context *dev_context = dev_ctxt; | 1111 | struct bridge_dev_context *dev_context = dev_ctxt; |
1114 | while ((total_bytes > 0) && DSP_SUCCEEDED(status)) { | 1112 | while (total_bytes > 0 && !status) { |
1115 | copy_bytes = | 1113 | copy_bytes = |
1116 | total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes; | 1114 | total_bytes > BUFFERSIZE ? BUFFERSIZE : total_bytes; |
1117 | /* Read from External memory */ | 1115 | /* Read from External memory */ |
1118 | status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr, | 1116 | status = read_ext_dsp_data(dev_ctxt, host_buf, src_addr, |
1119 | copy_bytes, mem_type); | 1117 | copy_bytes, mem_type); |
1120 | if (DSP_SUCCEEDED(status)) { | 1118 | if (!status) { |
1121 | if (dest_addr < (dev_context->dw_dsp_start_add + | 1119 | if (dest_addr < (dev_context->dw_dsp_start_add + |
1122 | dev_context->dw_internal_size)) { | 1120 | dev_context->dw_internal_size)) { |
1123 | /* Write to Internal memory */ | 1121 | /* Write to Internal memory */ |
@@ -1149,7 +1147,7 @@ static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt, | |||
1149 | u32 ul_remain_bytes = 0; | 1147 | u32 ul_remain_bytes = 0; |
1150 | u32 ul_bytes = 0; | 1148 | u32 ul_bytes = 0; |
1151 | ul_remain_bytes = ul_num_bytes; | 1149 | ul_remain_bytes = ul_num_bytes; |
1152 | while (ul_remain_bytes > 0 && DSP_SUCCEEDED(status)) { | 1150 | while (ul_remain_bytes > 0 && !status) { |
1153 | ul_bytes = | 1151 | ul_bytes = |
1154 | ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; | 1152 | ul_remain_bytes > BUFFERSIZE ? BUFFERSIZE : ul_remain_bytes; |
1155 | if (dsp_addr < (dev_context->dw_dsp_start_add + | 1153 | if (dsp_addr < (dev_context->dw_dsp_start_add + |
@@ -1369,9 +1367,7 @@ static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt, | |||
1369 | } | 1367 | } |
1370 | up_read(&mm->mmap_sem); | 1368 | up_read(&mm->mmap_sem); |
1371 | func_cont: | 1369 | func_cont: |
1372 | if (DSP_SUCCEEDED(status)) { | 1370 | if (status) { |
1373 | status = 0; | ||
1374 | } else { | ||
1375 | /* | 1371 | /* |
1376 | * Roll out the mapped pages incase it failed in middle of | 1372 | * Roll out the mapped pages incase it failed in middle of |
1377 | * mapping | 1373 | * mapping |
@@ -1433,7 +1429,7 @@ static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | |||
1433 | "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, | 1429 | "pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr, |
1434 | ul_num_bytes, l1_base_va, pte_addr_l1); | 1430 | ul_num_bytes, l1_base_va, pte_addr_l1); |
1435 | 1431 | ||
1436 | while (rem_bytes && (DSP_SUCCEEDED(status))) { | 1432 | while (rem_bytes && !status) { |
1437 | u32 va_curr_orig = va_curr; | 1433 | u32 va_curr_orig = va_curr; |
1438 | /* Find whether the L1 PTE points to a valid L2 PT */ | 1434 | /* Find whether the L1 PTE points to a valid L2 PT */ |
1439 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); | 1435 | pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr); |
@@ -1472,7 +1468,7 @@ static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt, | |||
1472 | * entry. Similar checking is done for L1 PTEs too | 1468 | * entry. Similar checking is done for L1 PTEs too |
1473 | * below | 1469 | * below |
1474 | */ | 1470 | */ |
1475 | while (rem_bytes_l2 && (DSP_SUCCEEDED(status))) { | 1471 | while (rem_bytes_l2 && !status) { |
1476 | pte_val = *(u32 *) pte_addr_l2; | 1472 | pte_val = *(u32 *) pte_addr_l2; |
1477 | pte_size = hw_mmu_pte_size_l2(pte_val); | 1473 | pte_size = hw_mmu_pte_size_l2(pte_val); |
1478 | /* va_curr aligned to pte_size? */ | 1474 | /* va_curr aligned to pte_size? */ |
@@ -1639,7 +1635,7 @@ static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa, | |||
1639 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB | 1635 | HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB |
1640 | }; | 1636 | }; |
1641 | 1637 | ||
1642 | while (num_bytes && DSP_SUCCEEDED(status)) { | 1638 | while (num_bytes && !status) { |
1643 | /* To find the max. page size with which both PA & VA are | 1639 | /* To find the max. page size with which both PA & VA are |
1644 | * aligned */ | 1640 | * aligned */ |
1645 | all_bits = pa_curr | va_curr; | 1641 | all_bits = pa_curr | va_curr; |
@@ -1736,7 +1732,7 @@ static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | |||
1736 | * Should not overwrite it. */ | 1732 | * Should not overwrite it. */ |
1737 | status = -EPERM; | 1733 | status = -EPERM; |
1738 | } | 1734 | } |
1739 | if (DSP_SUCCEEDED(status)) { | 1735 | if (!status) { |
1740 | pg_tbl_va = l2_base_va; | 1736 | pg_tbl_va = l2_base_va; |
1741 | if (size == HW_PAGE_SIZE64KB) | 1737 | if (size == HW_PAGE_SIZE64KB) |
1742 | pt->pg_info[l2_page_num].num_entries += 16; | 1738 | pt->pg_info[l2_page_num].num_entries += 16; |
@@ -1749,7 +1745,7 @@ static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va, | |||
1749 | } | 1745 | } |
1750 | spin_unlock(&pt->pg_lock); | 1746 | spin_unlock(&pt->pg_lock); |
1751 | } | 1747 | } |
1752 | if (DSP_SUCCEEDED(status)) { | 1748 | if (!status) { |
1753 | dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", | 1749 | dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n", |
1754 | pg_tbl_va, pa, va, size); | 1750 | pg_tbl_va, pa, va, size); |
1755 | dev_dbg(bridge, "PTE: endianism %x, element_size %x, " | 1751 | dev_dbg(bridge, "PTE: endianism %x, element_size %x, " |
@@ -1789,7 +1785,7 @@ static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | |||
1789 | va_curr = ul_mpu_addr; | 1785 | va_curr = ul_mpu_addr; |
1790 | page[0] = vmalloc_to_page((void *)va_curr); | 1786 | page[0] = vmalloc_to_page((void *)va_curr); |
1791 | pa_next = page_to_phys(page[0]); | 1787 | pa_next = page_to_phys(page[0]); |
1792 | while (DSP_SUCCEEDED(status) && (i < num_pages)) { | 1788 | while (!status && (i < num_pages)) { |
1793 | /* | 1789 | /* |
1794 | * Reuse pa_next from the previous iteraion to avoid | 1790 | * Reuse pa_next from the previous iteraion to avoid |
1795 | * an extra va2pa call | 1791 | * an extra va2pa call |
@@ -1827,11 +1823,6 @@ static int mem_map_vmalloc(struct bridge_dev_context *dev_context, | |||
1827 | hw_attrs); | 1823 | hw_attrs); |
1828 | va_curr += size_curr; | 1824 | va_curr += size_curr; |
1829 | } | 1825 | } |
1830 | if (DSP_SUCCEEDED(status)) | ||
1831 | status = 0; | ||
1832 | else | ||
1833 | status = -EPERM; | ||
1834 | |||
1835 | /* | 1826 | /* |
1836 | * In any case, flush the TLB | 1827 | * In any case, flush the TLB |
1837 | * This is called from here instead from pte_update to avoid unnecessary | 1828 | * This is called from here instead from pte_update to avoid unnecessary |
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c index dc63b3ae837c..d9386450d4f7 100644 --- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c +++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c | |||
@@ -112,7 +112,7 @@ int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context) | |||
112 | /* Disable wdt on hibernation. */ | 112 | /* Disable wdt on hibernation. */ |
113 | dsp_wdt_enable(false); | 113 | dsp_wdt_enable(false); |
114 | 114 | ||
115 | if (DSP_SUCCEEDED(status)) { | 115 | if (!status) { |
116 | /* Update the Bridger Driver state */ | 116 | /* Update the Bridger Driver state */ |
117 | dev_context->dw_brd_state = BRD_DSP_HIBERNATION; | 117 | dev_context->dw_brd_state = BRD_DSP_HIBERNATION; |
118 | #ifdef CONFIG_TIDSPBRIDGE_DVFS | 118 | #ifdef CONFIG_TIDSPBRIDGE_DVFS |
@@ -310,7 +310,7 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context, | |||
310 | status = dsp_clk_disable(bpwr_clks[clk_id_index].clk); | 310 | status = dsp_clk_disable(bpwr_clks[clk_id_index].clk); |
311 | dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, | 311 | dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, |
312 | false); | 312 | false); |
313 | if (DSP_SUCCEEDED(status)) { | 313 | if (!status) { |
314 | (dev_context->dsp_per_clks) &= | 314 | (dev_context->dsp_per_clks) &= |
315 | (~((u32) (1 << bpwr_clks[clk_id_index].clk))); | 315 | (~((u32) (1 << bpwr_clks[clk_id_index].clk))); |
316 | } | 316 | } |
@@ -318,7 +318,7 @@ int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context, | |||
318 | case BPWR_ENABLE_CLOCK: | 318 | case BPWR_ENABLE_CLOCK: |
319 | status = dsp_clk_enable(bpwr_clks[clk_id_index].clk); | 319 | status = dsp_clk_enable(bpwr_clks[clk_id_index].clk); |
320 | dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true); | 320 | dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true); |
321 | if (DSP_SUCCEEDED(status)) | 321 | if (!status) |
322 | (dev_context->dsp_per_clks) |= | 322 | (dev_context->dsp_per_clks) |= |
323 | (1 << bpwr_clks[clk_id_index].clk); | 323 | (1 << bpwr_clks[clk_id_index].clk); |
324 | break; | 324 | break; |
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c index 742da05af0c6..190c028afe9b 100644 --- a/drivers/staging/tidspbridge/core/tiomap_io.c +++ b/drivers/staging/tidspbridge/core/tiomap_io.c | |||
@@ -70,19 +70,19 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | |||
70 | DBC_ASSERT(ul_shm_base_virt != 0); | 70 | DBC_ASSERT(ul_shm_base_virt != 0); |
71 | 71 | ||
72 | /* Check if it is a read of Trace section */ | 72 | /* Check if it is a read of Trace section */ |
73 | if (DSP_SUCCEEDED(status) && !ul_trace_sec_beg) { | 73 | if (!status && !ul_trace_sec_beg) { |
74 | status = dev_get_symbol(dev_context->hdev_obj, | 74 | status = dev_get_symbol(dev_context->hdev_obj, |
75 | DSP_TRACESEC_BEG, &ul_trace_sec_beg); | 75 | DSP_TRACESEC_BEG, &ul_trace_sec_beg); |
76 | } | 76 | } |
77 | DBC_ASSERT(ul_trace_sec_beg != 0); | 77 | DBC_ASSERT(ul_trace_sec_beg != 0); |
78 | 78 | ||
79 | if (DSP_SUCCEEDED(status) && !ul_trace_sec_end) { | 79 | if (!status && !ul_trace_sec_end) { |
80 | status = dev_get_symbol(dev_context->hdev_obj, | 80 | status = dev_get_symbol(dev_context->hdev_obj, |
81 | DSP_TRACESEC_END, &ul_trace_sec_end); | 81 | DSP_TRACESEC_END, &ul_trace_sec_end); |
82 | } | 82 | } |
83 | DBC_ASSERT(ul_trace_sec_end != 0); | 83 | DBC_ASSERT(ul_trace_sec_end != 0); |
84 | 84 | ||
85 | if (DSP_SUCCEEDED(status)) { | 85 | if (!status) { |
86 | if ((dsp_addr <= ul_trace_sec_end) && | 86 | if ((dsp_addr <= ul_trace_sec_end) && |
87 | (dsp_addr >= ul_trace_sec_beg)) | 87 | (dsp_addr >= ul_trace_sec_beg)) |
88 | trace_read = true; | 88 | trace_read = true; |
@@ -100,19 +100,19 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | |||
100 | ul_ext_end = 0; | 100 | ul_ext_end = 0; |
101 | 101 | ||
102 | /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */ | 102 | /* Get DYNEXT_BEG, EXT_BEG and EXT_END. */ |
103 | if (DSP_SUCCEEDED(status) && !ul_dyn_ext_base) { | 103 | if (!status && !ul_dyn_ext_base) { |
104 | status = dev_get_symbol(dev_context->hdev_obj, | 104 | status = dev_get_symbol(dev_context->hdev_obj, |
105 | DYNEXTBASE, &ul_dyn_ext_base); | 105 | DYNEXTBASE, &ul_dyn_ext_base); |
106 | } | 106 | } |
107 | DBC_ASSERT(ul_dyn_ext_base != 0); | 107 | DBC_ASSERT(ul_dyn_ext_base != 0); |
108 | 108 | ||
109 | if (DSP_SUCCEEDED(status)) { | 109 | if (!status) { |
110 | status = dev_get_symbol(dev_context->hdev_obj, | 110 | status = dev_get_symbol(dev_context->hdev_obj, |
111 | EXTBASE, &ul_ext_base); | 111 | EXTBASE, &ul_ext_base); |
112 | } | 112 | } |
113 | DBC_ASSERT(ul_ext_base != 0); | 113 | DBC_ASSERT(ul_ext_base != 0); |
114 | 114 | ||
115 | if (DSP_SUCCEEDED(status)) { | 115 | if (!status) { |
116 | status = dev_get_symbol(dev_context->hdev_obj, | 116 | status = dev_get_symbol(dev_context->hdev_obj, |
117 | EXTEND, &ul_ext_end); | 117 | EXTEND, &ul_ext_end); |
118 | } | 118 | } |
@@ -131,7 +131,7 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | |||
131 | if (ul_ext_end < ul_ext_base) | 131 | if (ul_ext_end < ul_ext_base) |
132 | status = -EPERM; | 132 | status = -EPERM; |
133 | 133 | ||
134 | if (DSP_SUCCEEDED(status)) { | 134 | if (!status) { |
135 | ul_tlb_base_virt = | 135 | ul_tlb_base_virt = |
136 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; | 136 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; |
137 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 137 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
@@ -167,7 +167,7 @@ int read_ext_dsp_data(struct bridge_dev_context *dev_ctxt, | |||
167 | 167 | ||
168 | offset = dsp_addr - ul_ext_base; | 168 | offset = dsp_addr - ul_ext_base; |
169 | 169 | ||
170 | if (DSP_SUCCEEDED(status)) | 170 | if (!status) |
171 | memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes); | 171 | memcpy(host_buff, (u8 *) dw_base_addr + offset, ul_num_bytes); |
172 | 172 | ||
173 | return status; | 173 | return status; |
@@ -247,12 +247,12 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
247 | /* Check if it is a load to Trace section */ | 247 | /* Check if it is a load to Trace section */ |
248 | ret = dev_get_symbol(dev_context->hdev_obj, | 248 | ret = dev_get_symbol(dev_context->hdev_obj, |
249 | DSP_TRACESEC_BEG, &ul_trace_sec_beg); | 249 | DSP_TRACESEC_BEG, &ul_trace_sec_beg); |
250 | if (DSP_SUCCEEDED(ret)) | 250 | if (!ret) |
251 | ret = dev_get_symbol(dev_context->hdev_obj, | 251 | ret = dev_get_symbol(dev_context->hdev_obj, |
252 | DSP_TRACESEC_END, | 252 | DSP_TRACESEC_END, |
253 | &ul_trace_sec_end); | 253 | &ul_trace_sec_end); |
254 | } | 254 | } |
255 | if (DSP_SUCCEEDED(ret)) { | 255 | if (!ret) { |
256 | if ((dsp_addr <= ul_trace_sec_end) && | 256 | if ((dsp_addr <= ul_trace_sec_end) && |
257 | (dsp_addr >= ul_trace_sec_beg)) | 257 | (dsp_addr >= ul_trace_sec_beg)) |
258 | trace_load = true; | 258 | trace_load = true; |
@@ -272,7 +272,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
272 | SHMBASENAME, &ul_shm_base_virt); | 272 | SHMBASENAME, &ul_shm_base_virt); |
273 | DBC_ASSERT(ul_shm_base_virt != 0); | 273 | DBC_ASSERT(ul_shm_base_virt != 0); |
274 | if (dynamic_load) { | 274 | if (dynamic_load) { |
275 | if (DSP_SUCCEEDED(ret)) { | 275 | if (!ret) { |
276 | if (symbols_reloaded) | 276 | if (symbols_reloaded) |
277 | ret = | 277 | ret = |
278 | dev_get_symbol | 278 | dev_get_symbol |
@@ -280,7 +280,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
280 | &ul_ext_base); | 280 | &ul_ext_base); |
281 | } | 281 | } |
282 | DBC_ASSERT(ul_ext_base != 0); | 282 | DBC_ASSERT(ul_ext_base != 0); |
283 | if (DSP_SUCCEEDED(ret)) { | 283 | if (!ret) { |
284 | /* DR OMAPS00013235 : DLModules array may be | 284 | /* DR OMAPS00013235 : DLModules array may be |
285 | * in EXTMEM. It is expected that DYNEXTMEM and | 285 | * in EXTMEM. It is expected that DYNEXTMEM and |
286 | * EXTMEM are contiguous, so checking for the | 286 | * EXTMEM are contiguous, so checking for the |
@@ -293,13 +293,13 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
293 | } | 293 | } |
294 | } else { | 294 | } else { |
295 | if (symbols_reloaded) { | 295 | if (symbols_reloaded) { |
296 | if (DSP_SUCCEEDED(ret)) | 296 | if (!ret) |
297 | ret = | 297 | ret = |
298 | dev_get_symbol | 298 | dev_get_symbol |
299 | (dev_context->hdev_obj, EXTBASE, | 299 | (dev_context->hdev_obj, EXTBASE, |
300 | &ul_ext_base); | 300 | &ul_ext_base); |
301 | DBC_ASSERT(ul_ext_base != 0); | 301 | DBC_ASSERT(ul_ext_base != 0); |
302 | if (DSP_SUCCEEDED(ret)) | 302 | if (!ret) |
303 | ret = | 303 | ret = |
304 | dev_get_symbol | 304 | dev_get_symbol |
305 | (dev_context->hdev_obj, EXTEND, | 305 | (dev_context->hdev_obj, EXTEND, |
@@ -316,19 +316,16 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
316 | if (ul_ext_end < ul_ext_base) | 316 | if (ul_ext_end < ul_ext_base) |
317 | ret = -EPERM; | 317 | ret = -EPERM; |
318 | 318 | ||
319 | if (DSP_SUCCEEDED(ret)) { | 319 | if (!ret) { |
320 | ul_tlb_base_virt = | 320 | ul_tlb_base_virt = |
321 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; | 321 | dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE; |
322 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); | 322 | DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt); |
323 | 323 | ||
324 | if (symbols_reloaded) { | 324 | if (symbols_reloaded) { |
325 | if (DSP_SUCCEEDED(ret)) { | 325 | ret = dev_get_symbol |
326 | ret = | ||
327 | dev_get_symbol | ||
328 | (dev_context->hdev_obj, | 326 | (dev_context->hdev_obj, |
329 | DSP_TRACESEC_END, &shm0_end); | 327 | DSP_TRACESEC_END, &shm0_end); |
330 | } | 328 | if (!ret) { |
331 | if (DSP_SUCCEEDED(ret)) { | ||
332 | ret = | 329 | ret = |
333 | dev_get_symbol | 330 | dev_get_symbol |
334 | (dev_context->hdev_obj, DYNEXTBASE, | 331 | (dev_context->hdev_obj, DYNEXTBASE, |
@@ -360,7 +357,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
360 | if (!dw_base_addr || !ul_ext_base || !ul_ext_end) | 357 | if (!dw_base_addr || !ul_ext_base || !ul_ext_end) |
361 | ret = -EPERM; | 358 | ret = -EPERM; |
362 | 359 | ||
363 | if (DSP_SUCCEEDED(ret)) { | 360 | if (!ret) { |
364 | for (i = 0; i < 4; i++) | 361 | for (i = 0; i < 4; i++) |
365 | remain_byte[i] = 0x0; | 362 | remain_byte[i] = 0x0; |
366 | 363 | ||
@@ -369,7 +366,7 @@ int write_ext_dsp_data(struct bridge_dev_context *dev_context, | |||
369 | if (dsp_addr > ul_ext_end || dw_offset > dsp_addr) | 366 | if (dsp_addr > ul_ext_end || dw_offset > dsp_addr) |
370 | ret = -EPERM; | 367 | ret = -EPERM; |
371 | } | 368 | } |
372 | if (DSP_SUCCEEDED(ret)) { | 369 | if (!ret) { |
373 | if (ul_num_bytes) | 370 | if (ul_num_bytes) |
374 | memcpy((u8 *) dw_base_addr + dw_offset, host_buff, | 371 | memcpy((u8 *) dw_base_addr + dw_offset, host_buff, |
375 | ul_num_bytes); | 372 | ul_num_bytes); |