aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/staging/tidspbridge/hw/EasiGlobal.h2
-rw-r--r--drivers/staging/tidspbridge/hw/GlobalTypes.h72
-rw-r--r--drivers/staging/tidspbridge/hw/MMURegAcM.h96
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.c46
-rw-r--r--drivers/staging/tidspbridge/hw/hw_mmu.h20
-rw-r--r--drivers/staging/tidspbridge/include/dspbridge/cmm.h6
-rw-r--r--drivers/staging/tidspbridge/pmgr/cmm.c14
7 files changed, 129 insertions, 127 deletions
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
index 9b45aa7a117..e48d7f67c60 100644
--- a/drivers/staging/tidspbridge/hw/EasiGlobal.h
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -36,6 +36,6 @@
36 * 36 *
37 * NOTE: We currently dont use this functionality. 37 * NOTE: We currently dont use this functionality.
38 */ 38 */
39#define _DEBUG_LEVEL1_EASI(easiNum) ((void)0) 39#define _DEBUG_LEVEL1_EASI(easi_num) ((void)0)
40 40
41#endif /* _EASIGLOBAL_H */ 41#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/GlobalTypes.h b/drivers/staging/tidspbridge/hw/GlobalTypes.h
index 9b5515038ec..95fc8ca5036 100644
--- a/drivers/staging/tidspbridge/hw/GlobalTypes.h
+++ b/drivers/staging/tidspbridge/hw/GlobalTypes.h
@@ -94,39 +94,39 @@
94#define LOWER8BIT_MASK 0x000000FF 94#define LOWER8BIT_MASK 0x000000FF
95 95
96/* 96/*
97 * Definition: RETURN32BITS_FROM16LOWER_AND16UPPER(lower16Bits, upper16Bits) 97 * Definition: RETURN32BITS_FROM16LOWER_AND16UPPER(lower16_bits, upper16_bits)
98 * 98 *
99 * DESCRIPTION: Returns a 32 bit value given a 16 bit lower value and a 16 99 * DESCRIPTION: Returns a 32 bit value given a 16 bit lower value and a 16
100 * bit upper value 100 * bit upper value
101 */ 101 */
102#define RETURN32BITS_FROM16LOWER_AND16UPPER(lower16Bits, upper16Bits)\ 102#define RETURN32BITS_FROM16LOWER_AND16UPPER(lower16_bits, upper16_bits)\
103 (((((u32)lower16Bits) & LOWER16BIT_MASK)) | \ 103 (((((u32)lower16_bits) & LOWER16BIT_MASK)) | \
104 (((((u32)upper16Bits) & LOWER16BIT_MASK) << UPPER16BIT_SHIFT))) 104 (((((u32)upper16_bits) & LOWER16BIT_MASK) << UPPER16BIT_SHIFT)))
105 105
106/* 106/*
107 * Definition: RETURN16BITS_FROM8LOWER_AND8UPPER(lower16Bits, upper16Bits) 107 * Definition: RETURN16BITS_FROM8LOWER_AND8UPPER(lower16_bits, upper16_bits)
108 * 108 *
109 * DESCRIPTION: Returns a 16 bit value given a 8 bit lower value and a 8 109 * DESCRIPTION: Returns a 16 bit value given a 8 bit lower value and a 8
110 * bit upper value 110 * bit upper value
111 */ 111 */
112#define RETURN16BITS_FROM8LOWER_AND8UPPER(lower8Bits, upper8Bits)\ 112#define RETURN16BITS_FROM8LOWER_AND8UPPER(lower8_bits, upper8_bits)\
113 (((((u32)lower8Bits) & LOWER8BIT_MASK)) | \ 113 (((((u32)lower8_bits) & LOWER8BIT_MASK)) | \
114 (((((u32)upper8Bits) & LOWER8BIT_MASK) << UPPER8BIT_OF16_SHIFT))) 114 (((((u32)upper8_bits) & LOWER8BIT_MASK) << UPPER8BIT_OF16_SHIFT)))
115 115
116/* 116/*
117 * Definition: RETURN32BITS_FROM48BIT_VALUES(lower8Bits, lowerMiddle8Bits, 117 * Definition: RETURN32BITS_FROM48BIT_VALUES(lower8_bits, lower_middle8_bits,
118 * lowerUpper8Bits, upper8Bits) 118 * lower_upper8_bits, upper8_bits)
119 * 119 *
120 * DESCRIPTION: Returns a 32 bit value given four 8 bit values 120 * DESCRIPTION: Returns a 32 bit value given four 8 bit values
121 */ 121 */
122#define RETURN32BITS_FROM48BIT_VALUES(lower8Bits, lowerMiddle8Bits,\ 122#define RETURN32BITS_FROM48BIT_VALUES(lower8_bits, lower_middle8_bits,\
123 lowerUpper8Bits, upper8Bits)\ 123 lower_upper8_bits, upper8_bits)\
124 (((((u32)lower8Bits) & LOWER8BIT_MASK)) | \ 124 (((((u32)lower8_bits) & LOWER8BIT_MASK)) | \
125 (((((u32)lowerMiddle8Bits) & LOWER8BIT_MASK) <<\ 125 (((((u32)lower_middle8_bits) & LOWER8BIT_MASK) <<\
126 LOWER_MIDDLE8BIT_SHIFT)) | \ 126 LOWER_MIDDLE8BIT_SHIFT)) | \
127 (((((u32)lowerUpper8Bits) & LOWER8BIT_MASK) <<\ 127 (((((u32)lower_upper8_bits) & LOWER8BIT_MASK) <<\
128 UPPER_MIDDLE8BIT_SHIFT)) | \ 128 UPPER_MIDDLE8BIT_SHIFT)) | \
129 (((((u32)upper8Bits) & LOWER8BIT_MASK) <<\ 129 (((((u32)upper8_bits) & LOWER8BIT_MASK) <<\
130 UPPER8BIT_SHIFT))) 130 UPPER8BIT_SHIFT)))
131 131
132/* 132/*
@@ -285,24 +285,26 @@ enum return_code_label {
285 285
286/* Not sure if this all belongs here */ 286/* Not sure if this all belongs here */
287 287
288#define CHECK_RETURN_VALUE(actualValue, expectedValue, returnCodeIfMismatch,\ 288#define CHECK_RETURN_VALUE(actual_value, expected_value,\
289 spyCodeIfMisMatch) 289 return_code_if_mismatch, spy_code_if_mis_match)
290#define CHECK_RETURN_VALUE_RET(actualValue, expectedValue, returnCodeIfMismatch) 290#define CHECK_RETURN_VALUE_RET(actual_value, expected_value,\
291#define CHECK_RETURN_VALUE_RES(actualValue, expectedValue, spyCodeIfMisMatch) 291 return_code_if_mismatch)
292#define CHECK_RETURN_VALUE_RET_VOID(actualValue, expectedValue,\ 292#define CHECK_RETURN_VALUE_RES(actual_value, expected_value,\
293 spyCodeIfMisMatch) 293 spy_code_if_mis_match)
294 294#define CHECK_RETURN_VALUE_RET_VOID(actual_value, expected_value,\
295#define CHECK_INPUT_PARAM(actualValue, invalidValue, returnCodeIfMismatch,\ 295 spy_code_if_mis_match)
296 spyCodeIfMisMatch) 296
297#define CHECK_INPUT_PARAM_NO_SPY(actualValue, invalidValue,\ 297#define CHECK_INPUT_PARAM(actual_value, invalid_value,\
298 returnCodeIfMismatch) 298 return_code_if_mismatch, spy_code_if_mis_match)
299#define CHECK_INPUT_RANGE(actualValue, minValidValue, maxValidValue,\ 299#define CHECK_INPUT_PARAM_NO_SPY(actual_value, invalid_value,\
300 returnCodeIfMismatch, spyCodeIfMisMatch) 300 return_code_if_mismatch)
301#define CHECK_INPUT_RANGE_NO_SPY(actualValue, minValidValue, maxValidValue,\ 301#define CHECK_INPUT_RANGE(actual_value, min_valid_value, max_valid_value,\
302 returnCodeIfMismatch) 302 return_code_if_mismatch, spy_code_if_mis_match)
303#define CHECK_INPUT_RANGE_MIN0(actualValue, maxValidValue,\ 303#define CHECK_INPUT_RANGE_NO_SPY(actual_value, min_valid_value,\
304 returnCodeIfMismatch, spyCodeIfMisMatch) 304 max_valid_value, return_code_if_mismatch)
305#define CHECK_INPUT_RANGE_NO_SPY_MIN0(actualValue, maxValidValue,\ 305#define CHECK_INPUT_RANGE_MIN0(actual_value, max_valid_value,\
306 returnCodeIfMismatch) 306 return_code_if_mismatch, spy_code_if_mis_match)
307#define CHECK_INPUT_RANGE_NO_SPY_MIN0(actual_value, max_valid_value,\
308 return_code_if_mismatch)
307 309
308#endif /* _GLOBALTYPES_H */ 310#endif /* _GLOBALTYPES_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
index c341060b480..39db036da42 100644
--- a/drivers/staging/tidspbridge/hw/MMURegAcM.h
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -33,38 +33,38 @@
33{\ 33{\
34 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ 34 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
35 register u32 data = __raw_readl((base_address)+offset);\ 35 register u32 data = __raw_readl((base_address)+offset);\
36 register u32 newValue = (value);\ 36 register u32 new_value = (value);\
37 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\ 37 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
38 data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\ 38 data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
39 newValue <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\ 39 new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
40 newValue &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\ 40 new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
41 newValue |= data;\ 41 new_value |= data;\
42 __raw_writel(newValue, base_address+offset);\ 42 __raw_writel(new_value, base_address+offset);\
43} 43}
44 44
45#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\ 45#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
46{\ 46{\
47 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\ 47 const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
48 register u32 data = __raw_readl((base_address)+offset);\ 48 register u32 data = __raw_readl((base_address)+offset);\
49 register u32 newValue = (value);\ 49 register u32 new_value = (value);\
50 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\ 50 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
51 data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\ 51 data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
52 newValue <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\ 52 new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
53 newValue &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\ 53 new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
54 newValue |= data;\ 54 new_value |= data;\
55 __raw_writel(newValue, base_address+offset);\ 55 __raw_writel(new_value, base_address+offset);\
56} 56}
57 57
58#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\ 58#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
59 (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUSReadRegister32),\ 59 (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
60 __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET)) 60 __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
61 61
62#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\ 62#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
63{\ 63{\
64 const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\ 64 const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
65 register u32 newValue = (value);\ 65 register u32 new_value = (value);\
66 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\ 66 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
67 __raw_writel(newValue, (base_address)+offset);\ 67 __raw_writel(new_value, (base_address)+offset);\
68} 68}
69 69
70#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\ 70#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
@@ -74,9 +74,9 @@
74#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\ 74#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
75{\ 75{\
76 const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\ 76 const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
77 register u32 newValue = (value);\ 77 register u32 new_value = (value);\
78 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\ 78 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
79 __raw_writel(newValue, (base_address)+offset);\ 79 __raw_writel(new_value, (base_address)+offset);\
80} 80}
81 81
82#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\ 82#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
@@ -95,26 +95,26 @@
95{\ 95{\
96 const u32 offset = MMU_MMU_CNTL_OFFSET;\ 96 const u32 offset = MMU_MMU_CNTL_OFFSET;\
97 register u32 data = __raw_readl((base_address)+offset);\ 97 register u32 data = __raw_readl((base_address)+offset);\
98 register u32 newValue = (value);\ 98 register u32 new_value = (value);\
99 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\ 99 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
100 data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\ 100 data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
101 newValue <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\ 101 new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
102 newValue &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\ 102 new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
103 newValue |= data;\ 103 new_value |= data;\
104 __raw_writel(newValue, base_address+offset);\ 104 __raw_writel(new_value, base_address+offset);\
105} 105}
106 106
107#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\ 107#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
108{\ 108{\
109 const u32 offset = MMU_MMU_CNTL_OFFSET;\ 109 const u32 offset = MMU_MMU_CNTL_OFFSET;\
110 register u32 data = __raw_readl((base_address)+offset);\ 110 register u32 data = __raw_readl((base_address)+offset);\
111 register u32 newValue = (value);\ 111 register u32 new_value = (value);\
112 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\ 112 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
113 data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\ 113 data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
114 newValue <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\ 114 new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
115 newValue &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\ 115 new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
116 newValue |= data;\ 116 new_value |= data;\
117 __raw_writel(newValue, base_address+offset);\ 117 __raw_writel(new_value, base_address+offset);\
118} 118}
119 119
120#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\ 120#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
@@ -124,9 +124,9 @@
124#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\ 124#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
125{\ 125{\
126 const u32 offset = MMU_MMU_TTB_OFFSET;\ 126 const u32 offset = MMU_MMU_TTB_OFFSET;\
127 register u32 newValue = (value);\ 127 register u32 new_value = (value);\
128 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\ 128 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
129 __raw_writel(newValue, (base_address)+offset);\ 129 __raw_writel(new_value, (base_address)+offset);\
130} 130}
131 131
132#define MMUMMU_LOCK_READ_REGISTER32(base_address)\ 132#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
@@ -136,9 +136,9 @@
136#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\ 136#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
137{\ 137{\
138 const u32 offset = MMU_MMU_LOCK_OFFSET;\ 138 const u32 offset = MMU_MMU_LOCK_OFFSET;\
139 register u32 newValue = (value);\ 139 register u32 new_value = (value);\
140 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\ 140 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
141 __raw_writel(newValue, (base_address)+offset);\ 141 __raw_writel(new_value, (base_address)+offset);\
142} 142}
143 143
144#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\ 144#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
@@ -151,13 +151,13 @@
151{\ 151{\
152 const u32 offset = MMU_MMU_LOCK_OFFSET;\ 152 const u32 offset = MMU_MMU_LOCK_OFFSET;\
153 register u32 data = __raw_readl((base_address)+offset);\ 153 register u32 data = __raw_readl((base_address)+offset);\
154 register u32 newValue = (value);\ 154 register u32 new_value = (value);\
155 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCKBaseValueWrite32);\ 155 _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
156 data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\ 156 data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
157 newValue <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\ 157 new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
158 newValue &= MMU_MMU_LOCK_BASE_VALUE_MASK;\ 158 new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
159 newValue |= data;\ 159 new_value |= data;\
160 __raw_writel(newValue, base_address+offset);\ 160 __raw_writel(new_value, base_address+offset);\
161} 161}
162 162
163#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\ 163#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
@@ -170,13 +170,13 @@
170{\ 170{\
171 const u32 offset = MMU_MMU_LOCK_OFFSET;\ 171 const u32 offset = MMU_MMU_LOCK_OFFSET;\
172 register u32 data = __raw_readl((base_address)+offset);\ 172 register u32 data = __raw_readl((base_address)+offset);\
173 register u32 newValue = (value);\ 173 register u32 new_value = (value);\
174 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\ 174 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
175 data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\ 175 data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
176 newValue <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\ 176 new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
177 newValue &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\ 177 new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
178 newValue |= data;\ 178 new_value |= data;\
179 __raw_writel(newValue, base_address+offset);\ 179 __raw_writel(new_value, base_address+offset);\
180} 180}
181 181
182#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\ 182#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
@@ -192,33 +192,33 @@
192#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\ 192#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
193{\ 193{\
194 const u32 offset = MMU_MMU_LD_TLB_OFFSET;\ 194 const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
195 register u32 newValue = (value);\ 195 register u32 new_value = (value);\
196 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\ 196 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
197 __raw_writel(newValue, (base_address)+offset);\ 197 __raw_writel(new_value, (base_address)+offset);\
198} 198}
199 199
200#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\ 200#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
201{\ 201{\
202 const u32 offset = MMU_MMU_CAM_OFFSET;\ 202 const u32 offset = MMU_MMU_CAM_OFFSET;\
203 register u32 newValue = (value);\ 203 register u32 new_value = (value);\
204 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\ 204 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
205 __raw_writel(newValue, (base_address)+offset);\ 205 __raw_writel(new_value, (base_address)+offset);\
206} 206}
207 207
208#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\ 208#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
209{\ 209{\
210 const u32 offset = MMU_MMU_RAM_OFFSET;\ 210 const u32 offset = MMU_MMU_RAM_OFFSET;\
211 register u32 newValue = (value);\ 211 register u32 new_value = (value);\
212 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\ 212 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
213 __raw_writel(newValue, (base_address)+offset);\ 213 __raw_writel(new_value, (base_address)+offset);\
214} 214}
215 215
216#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\ 216#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
217{\ 217{\
218 const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\ 218 const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
219 register u32 newValue = (value);\ 219 register u32 new_value = (value);\
220 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\ 220 _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
221 __raw_writel(newValue, (base_address)+offset);\ 221 __raw_writel(new_value, (base_address)+offset);\
222} 222}
223 223
224#endif /* USE_LEVEL_1_MACROS */ 224#endif /* USE_LEVEL_1_MACROS */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
index 705cbe3d50d..969b5fc6537 100644
--- a/drivers/staging/tidspbridge/hw/hw_mmu.c
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -90,7 +90,7 @@ static hw_status mmu_flush_entry(const void __iomem *base_address);
90 * Description : It indicates the TLB entry is preserved entry 90 * Description : It indicates the TLB entry is preserved entry
91 * or not 91 * or not
92 * 92 *
93 * Identifier : validBit 93 * Identifier : valid_bit
94 * Type : const u32 94 * Type : const u32
95 * Description : It indicates the TLB entry is valid entry or not 95 * Description : It indicates the TLB entry is valid entry or not
96 * 96 *
@@ -115,7 +115,7 @@ static hw_status mmu_flush_entry(const void __iomem *base_address);
115static hw_status mmu_set_cam_entry(const void __iomem *base_address, 115static hw_status mmu_set_cam_entry(const void __iomem *base_address,
116 const u32 page_sz, 116 const u32 page_sz,
117 const u32 preserved_bit, 117 const u32 preserved_bit,
118 const u32 validBit, 118 const u32 valid_bit,
119 const u32 virtual_addr_tag); 119 const u32 virtual_addr_tag);
120 120
121/* 121/*
@@ -194,11 +194,11 @@ hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
194} 194}
195 195
196hw_status hw_mmu_victim_num_set(const void __iomem *base_address, 196hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
197 u32 victimEntryNum) 197 u32 victim_entry_num)
198{ 198{
199 hw_status status = RET_OK; 199 hw_status status = RET_OK;
200 200
201 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victimEntryNum); 201 MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
202 202
203 return status; 203 return status;
204} 204}
@@ -293,7 +293,7 @@ hw_status hw_mmu_twl_disable(const void __iomem *base_address)
293 return status; 293 return status;
294} 294}
295 295
296hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtualAddr, 296hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
297 u32 page_sz) 297 u32 page_sz)
298{ 298{
299 hw_status status = RET_OK; 299 hw_status status = RET_OK;
@@ -322,7 +322,7 @@ hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtualAddr,
322 } 322 }
323 323
324 /* Generate the 20-bit tag from virtual address */ 324 /* Generate the 20-bit tag from virtual address */
325 virtual_addr_tag = ((virtualAddr & MMU_ADDR_MASK) >> 12); 325 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
326 326
327 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag); 327 mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
328 328
@@ -333,11 +333,11 @@ hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtualAddr,
333 333
334hw_status hw_mmu_tlb_add(const void __iomem *base_address, 334hw_status hw_mmu_tlb_add(const void __iomem *base_address,
335 u32 physical_addr, 335 u32 physical_addr,
336 u32 virtualAddr, 336 u32 virtual_addr,
337 u32 page_sz, 337 u32 page_sz,
338 u32 entry_num, 338 u32 entry_num,
339 struct hw_mmu_map_attrs_t *map_attrs, 339 struct hw_mmu_map_attrs_t *map_attrs,
340 s8 preserved_bit, s8 validBit) 340 s8 preserved_bit, s8 valid_bit)
341{ 341{
342 hw_status status = RET_OK; 342 hw_status status = RET_OK;
343 u32 lock_reg; 343 u32 lock_reg;
@@ -377,10 +377,10 @@ hw_status hw_mmu_tlb_add(const void __iomem *base_address,
377 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address); 377 lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
378 378
379 /* Generate the 20-bit tag from virtual address */ 379 /* Generate the 20-bit tag from virtual address */
380 virtual_addr_tag = ((virtualAddr & MMU_ADDR_MASK) >> 12); 380 virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
381 381
382 /* Write the fields in the CAM Entry Register */ 382 /* Write the fields in the CAM Entry Register */
383 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, validBit, 383 mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
384 virtual_addr_tag); 384 virtual_addr_tag);
385 385
386 /* Write the different fields of the RAM Entry Register */ 386 /* Write the different fields of the RAM Entry Register */
@@ -403,7 +403,7 @@ hw_status hw_mmu_tlb_add(const void __iomem *base_address,
403 403
404hw_status hw_mmu_pte_set(const u32 pg_tbl_va, 404hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
405 u32 physical_addr, 405 u32 physical_addr,
406 u32 virtualAddr, 406 u32 virtual_addr,
407 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs) 407 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
408{ 408{
409 hw_status status = RET_OK; 409 hw_status status = RET_OK;
@@ -413,7 +413,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
413 switch (page_sz) { 413 switch (page_sz) {
414 case HW_PAGE_SIZE4KB: 414 case HW_PAGE_SIZE4KB:
415 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, 415 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
416 virtualAddr & 416 virtual_addr &
417 MMU_SMALL_PAGE_MASK); 417 MMU_SMALL_PAGE_MASK);
418 pte_val = 418 pte_val =
419 ((physical_addr & MMU_SMALL_PAGE_MASK) | 419 ((physical_addr & MMU_SMALL_PAGE_MASK) |
@@ -425,7 +425,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
425 case HW_PAGE_SIZE64KB: 425 case HW_PAGE_SIZE64KB:
426 num_entries = 16; 426 num_entries = 16;
427 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, 427 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
428 virtualAddr & 428 virtual_addr &
429 MMU_LARGE_PAGE_MASK); 429 MMU_LARGE_PAGE_MASK);
430 pte_val = 430 pte_val =
431 ((physical_addr & MMU_LARGE_PAGE_MASK) | 431 ((physical_addr & MMU_LARGE_PAGE_MASK) |
@@ -436,7 +436,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
436 436
437 case HW_PAGE_SIZE1MB: 437 case HW_PAGE_SIZE1MB:
438 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, 438 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
439 virtualAddr & 439 virtual_addr &
440 MMU_SECTION_ADDR_MASK); 440 MMU_SECTION_ADDR_MASK);
441 pte_val = 441 pte_val =
442 ((((physical_addr & MMU_SECTION_ADDR_MASK) | 442 ((((physical_addr & MMU_SECTION_ADDR_MASK) |
@@ -448,7 +448,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
448 case HW_PAGE_SIZE16MB: 448 case HW_PAGE_SIZE16MB:
449 num_entries = 16; 449 num_entries = 16;
450 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, 450 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
451 virtualAddr & 451 virtual_addr &
452 MMU_SSECTION_ADDR_MASK); 452 MMU_SSECTION_ADDR_MASK);
453 pte_val = 453 pte_val =
454 (((physical_addr & MMU_SSECTION_ADDR_MASK) | 454 (((physical_addr & MMU_SSECTION_ADDR_MASK) |
@@ -460,7 +460,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
460 460
461 case HW_MMU_COARSE_PAGE_SIZE: 461 case HW_MMU_COARSE_PAGE_SIZE:
462 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, 462 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
463 virtualAddr & 463 virtual_addr &
464 MMU_SECTION_ADDR_MASK); 464 MMU_SECTION_ADDR_MASK);
465 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1; 465 pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
466 break; 466 break;
@@ -475,7 +475,7 @@ hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
475 return status; 475 return status;
476} 476}
477 477
478hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtualAddr, u32 page_size) 478hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
479{ 479{
480 hw_status status = RET_OK; 480 hw_status status = RET_OK;
481 u32 pte_addr; 481 u32 pte_addr;
@@ -484,28 +484,28 @@ hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtualAddr, u32 page_size)
484 switch (page_size) { 484 switch (page_size) {
485 case HW_PAGE_SIZE4KB: 485 case HW_PAGE_SIZE4KB:
486 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, 486 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
487 virtualAddr & 487 virtual_addr &
488 MMU_SMALL_PAGE_MASK); 488 MMU_SMALL_PAGE_MASK);
489 break; 489 break;
490 490
491 case HW_PAGE_SIZE64KB: 491 case HW_PAGE_SIZE64KB:
492 num_entries = 16; 492 num_entries = 16;
493 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va, 493 pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
494 virtualAddr & 494 virtual_addr &
495 MMU_LARGE_PAGE_MASK); 495 MMU_LARGE_PAGE_MASK);
496 break; 496 break;
497 497
498 case HW_PAGE_SIZE1MB: 498 case HW_PAGE_SIZE1MB:
499 case HW_MMU_COARSE_PAGE_SIZE: 499 case HW_MMU_COARSE_PAGE_SIZE:
500 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, 500 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
501 virtualAddr & 501 virtual_addr &
502 MMU_SECTION_ADDR_MASK); 502 MMU_SECTION_ADDR_MASK);
503 break; 503 break;
504 504
505 case HW_PAGE_SIZE16MB: 505 case HW_PAGE_SIZE16MB:
506 num_entries = 16; 506 num_entries = 16;
507 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va, 507 pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
508 virtualAddr & 508 virtual_addr &
509 MMU_SSECTION_ADDR_MASK); 509 MMU_SSECTION_ADDR_MASK);
510 break; 510 break;
511 511
@@ -539,7 +539,7 @@ static hw_status mmu_flush_entry(const void __iomem *base_address)
539static hw_status mmu_set_cam_entry(const void __iomem *base_address, 539static hw_status mmu_set_cam_entry(const void __iomem *base_address,
540 const u32 page_sz, 540 const u32 page_sz,
541 const u32 preserved_bit, 541 const u32 preserved_bit,
542 const u32 validBit, 542 const u32 valid_bit,
543 const u32 virtual_addr_tag) 543 const u32 virtual_addr_tag)
544{ 544{
545 hw_status status = RET_OK; 545 hw_status status = RET_OK;
@@ -550,7 +550,7 @@ static hw_status mmu_set_cam_entry(const void __iomem *base_address,
550 RES_MMU_BASE + RES_INVALID_INPUT_PARAM); 550 RES_MMU_BASE + RES_INVALID_INPUT_PARAM);
551 551
552 mmu_cam_reg = (virtual_addr_tag << 12); 552 mmu_cam_reg = (virtual_addr_tag << 12);
553 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (validBit << 2) | 553 mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
554 (preserved_bit << 3); 554 (preserved_bit << 3);
555 555
556 /* write values to register */ 556 /* write values to register */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
index 554b52eff43..6ba133e6a21 100644
--- a/drivers/staging/tidspbridge/hw/hw_mmu.h
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -50,7 +50,7 @@ extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
50 u32 num_locked_entries); 50 u32 num_locked_entries);
51 51
52extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address, 52extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
53 u32 victimEntryNum); 53 u32 victim_entry_num);
54 54
55/* For MMU faults */ 55/* For MMU faults */
56extern hw_status hw_mmu_event_ack(const void __iomem *base_address, 56extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
@@ -77,45 +77,45 @@ extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
77extern hw_status hw_mmu_twl_disable(const void __iomem *base_address); 77extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
78 78
79extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address, 79extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
80 u32 virtualAddr, u32 page_sz); 80 u32 virtual_addr, u32 page_sz);
81 81
82extern hw_status hw_mmu_tlb_add(const void __iomem *base_address, 82extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
83 u32 physical_addr, 83 u32 physical_addr,
84 u32 virtualAddr, 84 u32 virtual_addr,
85 u32 page_sz, 85 u32 page_sz,
86 u32 entry_num, 86 u32 entry_num,
87 struct hw_mmu_map_attrs_t *map_attrs, 87 struct hw_mmu_map_attrs_t *map_attrs,
88 s8 preserved_bit, s8 validBit); 88 s8 preserved_bit, s8 valid_bit);
89 89
90/* For PTEs */ 90/* For PTEs */
91extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va, 91extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
92 u32 physical_addr, 92 u32 physical_addr,
93 u32 virtualAddr, 93 u32 virtual_addr,
94 u32 page_sz, 94 u32 page_sz,
95 struct hw_mmu_map_attrs_t *map_attrs); 95 struct hw_mmu_map_attrs_t *map_attrs);
96 96
97extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, 97extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
98 u32 page_size, u32 virtualAddr); 98 u32 page_size, u32 virtual_addr);
99 99
100void hw_mmu_tlb_flush_all(const void __iomem *base); 100void hw_mmu_tlb_flush_all(const void __iomem *base);
101 101
102static inline u32 hw_mmu_pte_addr_l1(u32 L1_base, u32 va) 102static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
103{ 103{
104 u32 pte_addr; 104 u32 pte_addr;
105 u32 va31_to20; 105 u32 va31_to20;
106 106
107 va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */ 107 va31_to20 = va >> (20 - 2); /* Left-shift by 2 here itself */
108 va31_to20 &= 0xFFFFFFFCUL; 108 va31_to20 &= 0xFFFFFFFCUL;
109 pte_addr = L1_base + va31_to20; 109 pte_addr = l1_base + va31_to20;
110 110
111 return pte_addr; 111 return pte_addr;
112} 112}
113 113
114static inline u32 hw_mmu_pte_addr_l2(u32 L2_base, u32 va) 114static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
115{ 115{
116 u32 pte_addr; 116 u32 pte_addr;
117 117
118 pte_addr = (L2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC); 118 pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
119 119
120 return pte_addr; 120 return pte_addr;
121} 121}
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cmm.h b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
index 3944a1e505c..086ca2568ed 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cmm.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cmm.h
@@ -370,17 +370,17 @@ extern int cmm_xlator_info(struct cmm_xlatorobject *xlator,
370 * Parameters: 370 * Parameters:
371 * xlator: handle to translator. 371 * xlator: handle to translator.
372 * paddr address of buffer to translate. 372 * paddr address of buffer to translate.
373 * xType Type of address xlation. CMM_PA2VA or CMM_VA2PA. 373 * xtype Type of address xlation. CMM_PA2VA or CMM_VA2PA.
374 * Returns: 374 * Returns:
375 * Valid address on success, else NULL. 375 * Valid address on success, else NULL.
376 * Requires: 376 * Requires:
377 * refs > 0 377 * refs > 0
378 * paddr != NULL 378 * paddr != NULL
379 * xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA) 379 * xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA)
380 * Ensures: 380 * Ensures:
381 * 381 *
382 */ 382 */
383extern void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, 383extern void *cmm_xlator_translate(struct cmm_xlatorobject *xlator,
384 void *paddr, enum cmm_xlatetype xType); 384 void *paddr, enum cmm_xlatetype xtype);
385 385
386#endif /* CMM_ */ 386#endif /* CMM_ */
diff --git a/drivers/staging/tidspbridge/pmgr/cmm.c b/drivers/staging/tidspbridge/pmgr/cmm.c
index 2381984e559..d054e5389eb 100644
--- a/drivers/staging/tidspbridge/pmgr/cmm.c
+++ b/drivers/staging/tidspbridge/pmgr/cmm.c
@@ -1103,7 +1103,7 @@ int cmm_xlator_info(struct cmm_xlatorobject *xlator, IN OUT u8 ** paddr,
1103 * ======== cmm_xlator_translate ======== 1103 * ======== cmm_xlator_translate ========
1104 */ 1104 */
1105void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr, 1105void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1106 enum cmm_xlatetype xType) 1106 enum cmm_xlatetype xtype)
1107{ 1107{
1108 u32 dw_addr_xlate = 0; 1108 u32 dw_addr_xlate = 0;
1109 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator; 1109 struct cmm_xlator *xlator_obj = (struct cmm_xlator *)xlator;
@@ -1113,7 +1113,7 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1113 1113
1114 DBC_REQUIRE(refs > 0); 1114 DBC_REQUIRE(refs > 0);
1115 DBC_REQUIRE(paddr != NULL); 1115 DBC_REQUIRE(paddr != NULL);
1116 DBC_REQUIRE((xType >= CMM_VA2PA) && (xType <= CMM_DSPPA2PA)); 1116 DBC_REQUIRE((xtype >= CMM_VA2PA) && (xtype <= CMM_DSPPA2PA));
1117 1117
1118 if (!xlator_obj) 1118 if (!xlator_obj)
1119 goto loop_cont; 1119 goto loop_cont;
@@ -1125,9 +1125,9 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1125 if (!allocator) 1125 if (!allocator)
1126 goto loop_cont; 1126 goto loop_cont;
1127 1127
1128 if ((xType == CMM_VA2DSPPA) || (xType == CMM_VA2PA) || 1128 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_VA2PA) ||
1129 (xType == CMM_PA2VA)) { 1129 (xtype == CMM_PA2VA)) {
1130 if (xType == CMM_PA2VA) { 1130 if (xtype == CMM_PA2VA) {
1131 /* Gpp Va = Va Base + offset */ 1131 /* Gpp Va = Va Base + offset */
1132 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base - 1132 dw_offset = (u8 *) paddr - (u8 *) (allocator->shm_base -
1133 allocator-> 1133 allocator->
@@ -1152,14 +1152,14 @@ void *cmm_xlator_translate(struct cmm_xlatorobject *xlator, void *paddr,
1152 dw_addr_xlate = (u32) paddr; 1152 dw_addr_xlate = (u32) paddr;
1153 } 1153 }
1154 /*Now convert address to proper target physical address if needed */ 1154 /*Now convert address to proper target physical address if needed */
1155 if ((xType == CMM_VA2DSPPA) || (xType == CMM_PA2DSPPA)) { 1155 if ((xtype == CMM_VA2DSPPA) || (xtype == CMM_PA2DSPPA)) {
1156 /* Got Gpp Pa now, convert to DSP Pa */ 1156 /* Got Gpp Pa now, convert to DSP Pa */
1157 dw_addr_xlate = 1157 dw_addr_xlate =
1158 GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size), 1158 GPPPA2DSPPA((allocator->shm_base - allocator->ul_dsp_size),
1159 dw_addr_xlate, 1159 dw_addr_xlate,
1160 allocator->dw_dsp_phys_addr_offset * 1160 allocator->dw_dsp_phys_addr_offset *
1161 allocator->c_factor); 1161 allocator->c_factor);
1162 } else if (xType == CMM_DSPPA2PA) { 1162 } else if (xtype == CMM_DSPPA2PA) {
1163 /* Got DSP Pa, convert to GPP Pa */ 1163 /* Got DSP Pa, convert to GPP Pa */
1164 dw_addr_xlate = 1164 dw_addr_xlate =
1165 DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size, 1165 DSPPA2GPPPA(allocator->shm_base - allocator->ul_dsp_size,