diff options
author | Dean Nelson <dcn@sgi.com> | 2008-05-12 17:02:02 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-05-13 11:02:23 -0400 |
commit | 65c17b801e03e40acdca0cd34e8eb1b8a347b539 (patch) | |
tree | fe5c5ccb73604d6cbd88525f930b50b8435a71a7 /drivers/misc | |
parent | 0cf942d75a6acfa11a41f63330d8780901eda4af (diff) |
drivers/misc/sgi-xp: clean up return values
Make XP return values more generic to XP and not so tied to XPC by changing
enum xpc_retval to xp_retval, along with changing return value prefixes from
xpc to xp. Also, cleanup a comment block that referenced some of these return
values as well as the handling of BTE related return values.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Acked-by: Robin Holt <holt@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/sgi-xp/xp.h | 291 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_main.c | 38 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc.h | 71 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_channel.c | 166 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 44 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_partition.c | 64 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpnet.c | 18 |
7 files changed, 291 insertions, 401 deletions
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 5515234be86a..a258fa6705c7 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -157,215 +157,136 @@ struct xpc_msg { | |||
157 | /* | 157 | /* |
158 | * Define the return values and values passed to user's callout functions. | 158 | * Define the return values and values passed to user's callout functions. |
159 | * (It is important to add new value codes at the end just preceding | 159 | * (It is important to add new value codes at the end just preceding |
160 | * xpcUnknownReason, which must have the highest numerical value.) | 160 | * xpUnknownReason, which must have the highest numerical value.) |
161 | */ | 161 | */ |
162 | enum xpc_retval { | 162 | enum xp_retval { |
163 | xpcSuccess = 0, | 163 | xpSuccess = 0, |
164 | 164 | ||
165 | xpcNotConnected, /* 1: channel is not connected */ | 165 | xpNotConnected, /* 1: channel is not connected */ |
166 | xpcConnected, /* 2: channel connected (opened) */ | 166 | xpConnected, /* 2: channel connected (opened) */ |
167 | xpcRETIRED1, /* 3: (formerly xpcDisconnected) */ | 167 | xpRETIRED1, /* 3: (formerly xpDisconnected) */ |
168 | 168 | ||
169 | xpcMsgReceived, /* 4: message received */ | 169 | xpMsgReceived, /* 4: message received */ |
170 | xpcMsgDelivered, /* 5: message delivered and acknowledged */ | 170 | xpMsgDelivered, /* 5: message delivered and acknowledged */ |
171 | 171 | ||
172 | xpcRETIRED2, /* 6: (formerly xpcTransferFailed) */ | 172 | xpRETIRED2, /* 6: (formerly xpTransferFailed) */ |
173 | 173 | ||
174 | xpcNoWait, /* 7: operation would require wait */ | 174 | xpNoWait, /* 7: operation would require wait */ |
175 | xpcRetry, /* 8: retry operation */ | 175 | xpRetry, /* 8: retry operation */ |
176 | xpcTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ | 176 | xpTimeout, /* 9: timeout in xpc_allocate_msg_wait() */ |
177 | xpcInterrupted, /* 10: interrupted wait */ | 177 | xpInterrupted, /* 10: interrupted wait */ |
178 | 178 | ||
179 | xpcUnequalMsgSizes, /* 11: message size disparity between sides */ | 179 | xpUnequalMsgSizes, /* 11: message size disparity between sides */ |
180 | xpcInvalidAddress, /* 12: invalid address */ | 180 | xpInvalidAddress, /* 12: invalid address */ |
181 | 181 | ||
182 | xpcNoMemory, /* 13: no memory available for XPC structures */ | 182 | xpNoMemory, /* 13: no memory available for XPC structures */ |
183 | xpcLackOfResources, /* 14: insufficient resources for operation */ | 183 | xpLackOfResources, /* 14: insufficient resources for operation */ |
184 | xpcUnregistered, /* 15: channel is not registered */ | 184 | xpUnregistered, /* 15: channel is not registered */ |
185 | xpcAlreadyRegistered, /* 16: channel is already registered */ | 185 | xpAlreadyRegistered, /* 16: channel is already registered */ |
186 | 186 | ||
187 | xpcPartitionDown, /* 17: remote partition is down */ | 187 | xpPartitionDown, /* 17: remote partition is down */ |
188 | xpcNotLoaded, /* 18: XPC module is not loaded */ | 188 | xpNotLoaded, /* 18: XPC module is not loaded */ |
189 | xpcUnloading, /* 19: this side is unloading XPC module */ | 189 | xpUnloading, /* 19: this side is unloading XPC module */ |
190 | 190 | ||
191 | xpcBadMagic, /* 20: XPC MAGIC string not found */ | 191 | xpBadMagic, /* 20: XPC MAGIC string not found */ |
192 | 192 | ||
193 | xpcReactivating, /* 21: remote partition was reactivated */ | 193 | xpReactivating, /* 21: remote partition was reactivated */ |
194 | 194 | ||
195 | xpcUnregistering, /* 22: this side is unregistering channel */ | 195 | xpUnregistering, /* 22: this side is unregistering channel */ |
196 | xpcOtherUnregistering, /* 23: other side is unregistering channel */ | 196 | xpOtherUnregistering, /* 23: other side is unregistering channel */ |
197 | 197 | ||
198 | xpcCloneKThread, /* 24: cloning kernel thread */ | 198 | xpCloneKThread, /* 24: cloning kernel thread */ |
199 | xpcCloneKThreadFailed, /* 25: cloning kernel thread failed */ | 199 | xpCloneKThreadFailed, /* 25: cloning kernel thread failed */ |
200 | 200 | ||
201 | xpcNoHeartbeat, /* 26: remote partition has no heartbeat */ | 201 | xpNoHeartbeat, /* 26: remote partition has no heartbeat */ |
202 | 202 | ||
203 | xpcPioReadError, /* 27: PIO read error */ | 203 | xpPioReadError, /* 27: PIO read error */ |
204 | xpcPhysAddrRegFailed, /* 28: registration of phys addr range failed */ | 204 | xpPhysAddrRegFailed, /* 28: registration of phys addr range failed */ |
205 | 205 | ||
206 | xpcBteDirectoryError, /* 29: maps to BTEFAIL_DIR */ | 206 | xpRETIRED3, /* 29: (formerly xpBteDirectoryError) */ |
207 | xpcBtePoisonError, /* 30: maps to BTEFAIL_POISON */ | 207 | xpRETIRED4, /* 30: (formerly xpBtePoisonError) */ |
208 | xpcBteWriteError, /* 31: maps to BTEFAIL_WERR */ | 208 | xpRETIRED5, /* 31: (formerly xpBteWriteError) */ |
209 | xpcBteAccessError, /* 32: maps to BTEFAIL_ACCESS */ | 209 | xpRETIRED6, /* 32: (formerly xpBteAccessError) */ |
210 | xpcBtePWriteError, /* 33: maps to BTEFAIL_PWERR */ | 210 | xpRETIRED7, /* 33: (formerly xpBtePWriteError) */ |
211 | xpcBtePReadError, /* 34: maps to BTEFAIL_PRERR */ | 211 | xpRETIRED8, /* 34: (formerly xpBtePReadError) */ |
212 | xpcBteTimeOutError, /* 35: maps to BTEFAIL_TOUT */ | 212 | xpRETIRED9, /* 35: (formerly xpBteTimeOutError) */ |
213 | xpcBteXtalkError, /* 36: maps to BTEFAIL_XTERR */ | 213 | xpRETIRED10, /* 36: (formerly xpBteXtalkError) */ |
214 | xpcBteNotAvailable, /* 37: maps to BTEFAIL_NOTAVAIL */ | 214 | xpRETIRED11, /* 37: (formerly xpBteNotAvailable) */ |
215 | xpcBteUnmappedError, /* 38: unmapped BTEFAIL_ error */ | 215 | xpRETIRED12, /* 38: (formerly xpBteUnmappedError) */ |
216 | 216 | ||
217 | xpcBadVersion, /* 39: bad version number */ | 217 | xpBadVersion, /* 39: bad version number */ |
218 | xpcVarsNotSet, /* 40: the XPC variables are not set up */ | 218 | xpVarsNotSet, /* 40: the XPC variables are not set up */ |
219 | xpcNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ | 219 | xpNoRsvdPageAddr, /* 41: unable to get rsvd page's phys addr */ |
220 | xpcInvalidPartid, /* 42: invalid partition ID */ | 220 | xpInvalidPartid, /* 42: invalid partition ID */ |
221 | xpcLocalPartid, /* 43: local partition ID */ | 221 | xpLocalPartid, /* 43: local partition ID */ |
222 | 222 | ||
223 | xpcOtherGoingDown, /* 44: other side going down, reason unknown */ | 223 | xpOtherGoingDown, /* 44: other side going down, reason unknown */ |
224 | xpcSystemGoingDown, /* 45: system is going down, reason unknown */ | 224 | xpSystemGoingDown, /* 45: system is going down, reason unknown */ |
225 | xpcSystemHalt, /* 46: system is being halted */ | 225 | xpSystemHalt, /* 46: system is being halted */ |
226 | xpcSystemReboot, /* 47: system is being rebooted */ | 226 | xpSystemReboot, /* 47: system is being rebooted */ |
227 | xpcSystemPoweroff, /* 48: system is being powered off */ | 227 | xpSystemPoweroff, /* 48: system is being powered off */ |
228 | 228 | ||
229 | xpcDisconnecting, /* 49: channel disconnecting (closing) */ | 229 | xpDisconnecting, /* 49: channel disconnecting (closing) */ |
230 | 230 | ||
231 | xpcOpenCloseError, /* 50: channel open/close protocol error */ | 231 | xpOpenCloseError, /* 50: channel open/close protocol error */ |
232 | 232 | ||
233 | xpcDisconnected, /* 51: channel disconnected (closed) */ | 233 | xpDisconnected, /* 51: channel disconnected (closed) */ |
234 | 234 | ||
235 | xpcBteSh2Start, /* 52: BTE CRB timeout */ | 235 | xpBteCopyError, /* 52: bte_copy() returned error */ |
236 | 236 | ||
237 | /* 53: 0x1 BTE Error Response Short */ | 237 | xpUnknownReason /* 53: unknown reason - must be last in enum */ |
238 | xpcBteSh2RspShort = xpcBteSh2Start + BTEFAIL_SH2_RESP_SHORT, | ||
239 | |||
240 | /* 54: 0x2 BTE Error Response Long */ | ||
241 | xpcBteSh2RspLong = xpcBteSh2Start + BTEFAIL_SH2_RESP_LONG, | ||
242 | |||
243 | /* 56: 0x4 BTE Error Response DSB */ | ||
244 | xpcBteSh2RspDSB = xpcBteSh2Start + BTEFAIL_SH2_RESP_DSP, | ||
245 | |||
246 | /* 60: 0x8 BTE Error Response Access */ | ||
247 | xpcBteSh2RspAccess = xpcBteSh2Start + BTEFAIL_SH2_RESP_ACCESS, | ||
248 | |||
249 | /* 68: 0x10 BTE Error CRB timeout */ | ||
250 | xpcBteSh2CRBTO = xpcBteSh2Start + BTEFAIL_SH2_CRB_TO, | ||
251 | |||
252 | /* 84: 0x20 BTE Error NACK limit */ | ||
253 | xpcBteSh2NACKLimit = xpcBteSh2Start + BTEFAIL_SH2_NACK_LIMIT, | ||
254 | |||
255 | /* 115: BTE end */ | ||
256 | xpcBteSh2End = xpcBteSh2Start + BTEFAIL_SH2_ALL, | ||
257 | |||
258 | xpcUnknownReason /* 116: unknown reason - must be last in enum */ | ||
259 | }; | 238 | }; |
260 | 239 | ||
261 | /* | 240 | /* |
262 | * Define the callout function types used by XPC to update the user on | 241 | * Define the callout function type used by XPC to update the user on |
263 | * connection activity and state changes (via the user function registered by | 242 | * connection activity and state changes via the user function registered |
264 | * xpc_connect()) and to notify them of messages received and delivered (via | 243 | * by xpc_connect(). |
265 | * the user function registered by xpc_send_notify()). | ||
266 | * | ||
267 | * The two function types are xpc_channel_func and xpc_notify_func and | ||
268 | * both share the following arguments, with the exception of "data", which | ||
269 | * only xpc_channel_func has. | ||
270 | * | 244 | * |
271 | * Arguments: | 245 | * Arguments: |
272 | * | 246 | * |
273 | * reason - reason code. (See following table.) | 247 | * reason - reason code. |
274 | * partid - partition ID associated with condition. | 248 | * partid - partition ID associated with condition. |
275 | * ch_number - channel # associated with condition. | 249 | * ch_number - channel # associated with condition. |
276 | * data - pointer to optional data. (See following table.) | 250 | * data - pointer to optional data. |
277 | * key - pointer to optional user-defined value provided as the "key" | 251 | * key - pointer to optional user-defined value provided as the "key" |
278 | * argument to xpc_connect() or xpc_send_notify(). | 252 | * argument to xpc_connect(). |
279 | * | 253 | * |
280 | * In the following table the "Optional Data" column applies to callouts made | 254 | * A reason code of xpConnected indicates that a connection has been |
281 | * to functions registered by xpc_connect(). A "NA" in that column indicates | 255 | * established to the specified partition on the specified channel. The data |
282 | * that this reason code can be passed to functions registered by | 256 | * argument indicates the max number of entries allowed in the message queue. |
283 | * xpc_send_notify() (i.e. they don't have data arguments). | ||
284 | * | 257 | * |
285 | * Also, the first three reason codes in the following table indicate | 258 | * A reason code of xpMsgReceived indicates that a XPC message arrived from |
286 | * success, whereas the others indicate failure. When a failure reason code | 259 | * the specified partition on the specified channel. The data argument |
287 | * is received, one can assume that the channel is not connected. | 260 | * specifies the address of the message's payload. The user must call |
261 | * xpc_received() when finished with the payload. | ||
288 | * | 262 | * |
289 | * | 263 | * All other reason codes indicate failure. The data argmument is NULL. |
290 | * Reason Code | Cause | Optional Data | 264 | * When a failure reason code is received, one can assume that the channel |
291 | * =====================+================================+===================== | 265 | * is not connected. |
292 | * xpcConnected | connection has been established| max #of entries | ||
293 | * | to the specified partition on | allowed in message | ||
294 | * | the specified channel | queue | ||
295 | * ---------------------+--------------------------------+--------------------- | ||
296 | * xpcMsgReceived | an XPC message arrived from | address of payload | ||
297 | * | the specified partition on the | | ||
298 | * | specified channel | [the user must call | ||
299 | * | | xpc_received() when | ||
300 | * | | finished with the | ||
301 | * | | payload] | ||
302 | * ---------------------+--------------------------------+--------------------- | ||
303 | * xpcMsgDelivered | notification that the message | NA | ||
304 | * | was delivered to the intended | | ||
305 | * | recipient and that they have | | ||
306 | * | acknowledged its receipt by | | ||
307 | * | calling xpc_received() | | ||
308 | * =====================+================================+===================== | ||
309 | * xpcUnequalMsgSizes | can't connect to the specified | NULL | ||
310 | * | partition on the specified | | ||
311 | * | channel because of mismatched | | ||
312 | * | message sizes | | ||
313 | * ---------------------+--------------------------------+--------------------- | ||
314 | * xpcNoMemory | insufficient memory avaiable | NULL | ||
315 | * | to allocate message queue | | ||
316 | * ---------------------+--------------------------------+--------------------- | ||
317 | * xpcLackOfResources | lack of resources to create | NULL | ||
318 | * | the necessary kthreads to | | ||
319 | * | support the channel | | ||
320 | * ---------------------+--------------------------------+--------------------- | ||
321 | * xpcUnregistering | this side's user has | NULL or NA | ||
322 | * | unregistered by calling | | ||
323 | * | xpc_disconnect() | | ||
324 | * ---------------------+--------------------------------+--------------------- | ||
325 | * xpcOtherUnregistering| the other side's user has | NULL or NA | ||
326 | * | unregistered by calling | | ||
327 | * | xpc_disconnect() | | ||
328 | * ---------------------+--------------------------------+--------------------- | ||
329 | * xpcNoHeartbeat | the other side's XPC is no | NULL or NA | ||
330 | * | longer heartbeating | | ||
331 | * | | | ||
332 | * ---------------------+--------------------------------+--------------------- | ||
333 | * xpcUnloading | this side's XPC module is | NULL or NA | ||
334 | * | being unloaded | | ||
335 | * | | | ||
336 | * ---------------------+--------------------------------+--------------------- | ||
337 | * xpcOtherUnloading | the other side's XPC module is | NULL or NA | ||
338 | * | is being unloaded | | ||
339 | * | | | ||
340 | * ---------------------+--------------------------------+--------------------- | ||
341 | * xpcPioReadError | xp_nofault_PIOR() returned an | NULL or NA | ||
342 | * | error while sending an IPI | | ||
343 | * | | | ||
344 | * ---------------------+--------------------------------+--------------------- | ||
345 | * xpcInvalidAddress | the address either received or | NULL or NA | ||
346 | * | sent by the specified partition| | ||
347 | * | is invalid | | ||
348 | * ---------------------+--------------------------------+--------------------- | ||
349 | * xpcBteNotAvailable | attempt to pull data from the | NULL or NA | ||
350 | * xpcBtePoisonError | specified partition over the | | ||
351 | * xpcBteWriteError | specified channel via a | | ||
352 | * xpcBteAccessError | bte_copy() failed | | ||
353 | * xpcBteTimeOutError | | | ||
354 | * xpcBteXtalkError | | | ||
355 | * xpcBteDirectoryError | | | ||
356 | * xpcBteGenericError | | | ||
357 | * xpcBteUnmappedError | | | ||
358 | * ---------------------+--------------------------------+--------------------- | ||
359 | * xpcUnknownReason | the specified channel to the | NULL or NA | ||
360 | * | specified partition was | | ||
361 | * | unavailable for unknown reasons| | ||
362 | * =====================+================================+===================== | ||
363 | */ | 266 | */ |
364 | 267 | typedef void (*xpc_channel_func) (enum xp_retval reason, partid_t partid, | |
365 | typedef void (*xpc_channel_func) (enum xpc_retval reason, partid_t partid, | ||
366 | int ch_number, void *data, void *key); | 268 | int ch_number, void *data, void *key); |
367 | 269 | ||
368 | typedef void (*xpc_notify_func) (enum xpc_retval reason, partid_t partid, | 270 | /* |
271 | * Define the callout function type used by XPC to notify the user of | ||
272 | * messages received and delivered via the user function registered by | ||
273 | * xpc_send_notify(). | ||
274 | * | ||
275 | * Arguments: | ||
276 | * | ||
277 | * reason - reason code. | ||
278 | * partid - partition ID associated with condition. | ||
279 | * ch_number - channel # associated with condition. | ||
280 | * key - pointer to optional user-defined value provided as the "key" | ||
281 | * argument to xpc_send_notify(). | ||
282 | * | ||
283 | * A reason code of xpMsgDelivered indicates that the message was delivered | ||
284 | * to the intended recipient and that they have acknowledged its receipt by | ||
285 | * calling xpc_received(). | ||
286 | * | ||
287 | * All other reason codes indicate failure. | ||
288 | */ | ||
289 | typedef void (*xpc_notify_func) (enum xp_retval reason, partid_t partid, | ||
369 | int ch_number, void *key); | 290 | int ch_number, void *key); |
370 | 291 | ||
371 | /* | 292 | /* |
@@ -401,43 +322,43 @@ struct xpc_registration { | |||
401 | struct xpc_interface { | 322 | struct xpc_interface { |
402 | void (*connect) (int); | 323 | void (*connect) (int); |
403 | void (*disconnect) (int); | 324 | void (*disconnect) (int); |
404 | enum xpc_retval (*allocate) (partid_t, int, u32, void **); | 325 | enum xp_retval (*allocate) (partid_t, int, u32, void **); |
405 | enum xpc_retval (*send) (partid_t, int, void *); | 326 | enum xp_retval (*send) (partid_t, int, void *); |
406 | enum xpc_retval (*send_notify) (partid_t, int, void *, | 327 | enum xp_retval (*send_notify) (partid_t, int, void *, |
407 | xpc_notify_func, void *); | 328 | xpc_notify_func, void *); |
408 | void (*received) (partid_t, int, void *); | 329 | void (*received) (partid_t, int, void *); |
409 | enum xpc_retval (*partid_to_nasids) (partid_t, void *); | 330 | enum xp_retval (*partid_to_nasids) (partid_t, void *); |
410 | }; | 331 | }; |
411 | 332 | ||
412 | extern struct xpc_interface xpc_interface; | 333 | extern struct xpc_interface xpc_interface; |
413 | 334 | ||
414 | extern void xpc_set_interface(void (*)(int), | 335 | extern void xpc_set_interface(void (*)(int), |
415 | void (*)(int), | 336 | void (*)(int), |
416 | enum xpc_retval (*)(partid_t, int, u32, void **), | 337 | enum xp_retval (*)(partid_t, int, u32, void **), |
417 | enum xpc_retval (*)(partid_t, int, void *), | 338 | enum xp_retval (*)(partid_t, int, void *), |
418 | enum xpc_retval (*)(partid_t, int, void *, | 339 | enum xp_retval (*)(partid_t, int, void *, |
419 | xpc_notify_func, void *), | 340 | xpc_notify_func, void *), |
420 | void (*)(partid_t, int, void *), | 341 | void (*)(partid_t, int, void *), |
421 | enum xpc_retval (*)(partid_t, void *)); | 342 | enum xp_retval (*)(partid_t, void *)); |
422 | extern void xpc_clear_interface(void); | 343 | extern void xpc_clear_interface(void); |
423 | 344 | ||
424 | extern enum xpc_retval xpc_connect(int, xpc_channel_func, void *, u16, | 345 | extern enum xp_retval xpc_connect(int, xpc_channel_func, void *, u16, |
425 | u16, u32, u32); | 346 | u16, u32, u32); |
426 | extern void xpc_disconnect(int); | 347 | extern void xpc_disconnect(int); |
427 | 348 | ||
428 | static inline enum xpc_retval | 349 | static inline enum xp_retval |
429 | xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload) | 350 | xpc_allocate(partid_t partid, int ch_number, u32 flags, void **payload) |
430 | { | 351 | { |
431 | return xpc_interface.allocate(partid, ch_number, flags, payload); | 352 | return xpc_interface.allocate(partid, ch_number, flags, payload); |
432 | } | 353 | } |
433 | 354 | ||
434 | static inline enum xpc_retval | 355 | static inline enum xp_retval |
435 | xpc_send(partid_t partid, int ch_number, void *payload) | 356 | xpc_send(partid_t partid, int ch_number, void *payload) |
436 | { | 357 | { |
437 | return xpc_interface.send(partid, ch_number, payload); | 358 | return xpc_interface.send(partid, ch_number, payload); |
438 | } | 359 | } |
439 | 360 | ||
440 | static inline enum xpc_retval | 361 | static inline enum xp_retval |
441 | xpc_send_notify(partid_t partid, int ch_number, void *payload, | 362 | xpc_send_notify(partid_t partid, int ch_number, void *payload, |
442 | xpc_notify_func func, void *key) | 363 | xpc_notify_func func, void *key) |
443 | { | 364 | { |
@@ -450,7 +371,7 @@ xpc_received(partid_t partid, int ch_number, void *payload) | |||
450 | return xpc_interface.received(partid, ch_number, payload); | 371 | return xpc_interface.received(partid, ch_number, payload); |
451 | } | 372 | } |
452 | 373 | ||
453 | static inline enum xpc_retval | 374 | static inline enum xp_retval |
454 | xpc_partid_to_nasids(partid_t partid, void *nasids) | 375 | xpc_partid_to_nasids(partid_t partid, void *nasids) |
455 | { | 376 | { |
456 | return xpc_interface.partid_to_nasids(partid, nasids); | 377 | return xpc_interface.partid_to_nasids(partid, nasids); |
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index 1fbf99bae963..0eadaaa6b0ea 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -42,21 +42,21 @@ EXPORT_SYMBOL_GPL(xpc_registrations); | |||
42 | /* | 42 | /* |
43 | * Initialize the XPC interface to indicate that XPC isn't loaded. | 43 | * Initialize the XPC interface to indicate that XPC isn't loaded. |
44 | */ | 44 | */ |
45 | static enum xpc_retval | 45 | static enum xp_retval |
46 | xpc_notloaded(void) | 46 | xpc_notloaded(void) |
47 | { | 47 | { |
48 | return xpcNotLoaded; | 48 | return xpNotLoaded; |
49 | } | 49 | } |
50 | 50 | ||
51 | struct xpc_interface xpc_interface = { | 51 | struct xpc_interface xpc_interface = { |
52 | (void (*)(int))xpc_notloaded, | 52 | (void (*)(int))xpc_notloaded, |
53 | (void (*)(int))xpc_notloaded, | 53 | (void (*)(int))xpc_notloaded, |
54 | (enum xpc_retval(*)(partid_t, int, u32, void **))xpc_notloaded, | 54 | (enum xp_retval(*)(partid_t, int, u32, void **))xpc_notloaded, |
55 | (enum xpc_retval(*)(partid_t, int, void *))xpc_notloaded, | 55 | (enum xp_retval(*)(partid_t, int, void *))xpc_notloaded, |
56 | (enum xpc_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) | 56 | (enum xp_retval(*)(partid_t, int, void *, xpc_notify_func, void *)) |
57 | xpc_notloaded, | 57 | xpc_notloaded, |
58 | (void (*)(partid_t, int, void *))xpc_notloaded, | 58 | (void (*)(partid_t, int, void *))xpc_notloaded, |
59 | (enum xpc_retval(*)(partid_t, void *))xpc_notloaded | 59 | (enum xp_retval(*)(partid_t, void *))xpc_notloaded |
60 | }; | 60 | }; |
61 | EXPORT_SYMBOL_GPL(xpc_interface); | 61 | EXPORT_SYMBOL_GPL(xpc_interface); |
62 | 62 | ||
@@ -66,12 +66,12 @@ EXPORT_SYMBOL_GPL(xpc_interface); | |||
66 | void | 66 | void |
67 | xpc_set_interface(void (*connect) (int), | 67 | xpc_set_interface(void (*connect) (int), |
68 | void (*disconnect) (int), | 68 | void (*disconnect) (int), |
69 | enum xpc_retval (*allocate) (partid_t, int, u32, void **), | 69 | enum xp_retval (*allocate) (partid_t, int, u32, void **), |
70 | enum xpc_retval (*send) (partid_t, int, void *), | 70 | enum xp_retval (*send) (partid_t, int, void *), |
71 | enum xpc_retval (*send_notify) (partid_t, int, void *, | 71 | enum xp_retval (*send_notify) (partid_t, int, void *, |
72 | xpc_notify_func, void *), | 72 | xpc_notify_func, void *), |
73 | void (*received) (partid_t, int, void *), | 73 | void (*received) (partid_t, int, void *), |
74 | enum xpc_retval (*partid_to_nasids) (partid_t, void *)) | 74 | enum xp_retval (*partid_to_nasids) (partid_t, void *)) |
75 | { | 75 | { |
76 | xpc_interface.connect = connect; | 76 | xpc_interface.connect = connect; |
77 | xpc_interface.disconnect = disconnect; | 77 | xpc_interface.disconnect = disconnect; |
@@ -91,16 +91,16 @@ xpc_clear_interface(void) | |||
91 | { | 91 | { |
92 | xpc_interface.connect = (void (*)(int))xpc_notloaded; | 92 | xpc_interface.connect = (void (*)(int))xpc_notloaded; |
93 | xpc_interface.disconnect = (void (*)(int))xpc_notloaded; | 93 | xpc_interface.disconnect = (void (*)(int))xpc_notloaded; |
94 | xpc_interface.allocate = (enum xpc_retval(*)(partid_t, int, u32, | 94 | xpc_interface.allocate = (enum xp_retval(*)(partid_t, int, u32, |
95 | void **))xpc_notloaded; | 95 | void **))xpc_notloaded; |
96 | xpc_interface.send = (enum xpc_retval(*)(partid_t, int, void *)) | 96 | xpc_interface.send = (enum xp_retval(*)(partid_t, int, void *)) |
97 | xpc_notloaded; | 97 | xpc_notloaded; |
98 | xpc_interface.send_notify = (enum xpc_retval(*)(partid_t, int, void *, | 98 | xpc_interface.send_notify = (enum xp_retval(*)(partid_t, int, void *, |
99 | xpc_notify_func, | 99 | xpc_notify_func, |
100 | void *))xpc_notloaded; | 100 | void *))xpc_notloaded; |
101 | xpc_interface.received = (void (*)(partid_t, int, void *)) | 101 | xpc_interface.received = (void (*)(partid_t, int, void *)) |
102 | xpc_notloaded; | 102 | xpc_notloaded; |
103 | xpc_interface.partid_to_nasids = (enum xpc_retval(*)(partid_t, void *)) | 103 | xpc_interface.partid_to_nasids = (enum xp_retval(*)(partid_t, void *)) |
104 | xpc_notloaded; | 104 | xpc_notloaded; |
105 | } | 105 | } |
106 | EXPORT_SYMBOL_GPL(xpc_clear_interface); | 106 | EXPORT_SYMBOL_GPL(xpc_clear_interface); |
@@ -123,13 +123,13 @@ EXPORT_SYMBOL_GPL(xpc_clear_interface); | |||
123 | * nentries - max #of XPC message entries a message queue can contain. | 123 | * nentries - max #of XPC message entries a message queue can contain. |
124 | * The actual number, which is determined when a connection | 124 | * The actual number, which is determined when a connection |
125 | * is established and may be less then requested, will be | 125 | * is established and may be less then requested, will be |
126 | * passed to the user via the xpcConnected callout. | 126 | * passed to the user via the xpConnected callout. |
127 | * assigned_limit - max number of kthreads allowed to be processing | 127 | * assigned_limit - max number of kthreads allowed to be processing |
128 | * messages (per connection) at any given instant. | 128 | * messages (per connection) at any given instant. |
129 | * idle_limit - max number of kthreads allowed to be idle at any given | 129 | * idle_limit - max number of kthreads allowed to be idle at any given |
130 | * instant. | 130 | * instant. |
131 | */ | 131 | */ |
132 | enum xpc_retval | 132 | enum xp_retval |
133 | xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | 133 | xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, |
134 | u16 nentries, u32 assigned_limit, u32 idle_limit) | 134 | u16 nentries, u32 assigned_limit, u32 idle_limit) |
135 | { | 135 | { |
@@ -143,12 +143,12 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
143 | registration = &xpc_registrations[ch_number]; | 143 | registration = &xpc_registrations[ch_number]; |
144 | 144 | ||
145 | if (mutex_lock_interruptible(®istration->mutex) != 0) | 145 | if (mutex_lock_interruptible(®istration->mutex) != 0) |
146 | return xpcInterrupted; | 146 | return xpInterrupted; |
147 | 147 | ||
148 | /* if XPC_CHANNEL_REGISTERED(ch_number) */ | 148 | /* if XPC_CHANNEL_REGISTERED(ch_number) */ |
149 | if (registration->func != NULL) { | 149 | if (registration->func != NULL) { |
150 | mutex_unlock(®istration->mutex); | 150 | mutex_unlock(®istration->mutex); |
151 | return xpcAlreadyRegistered; | 151 | return xpAlreadyRegistered; |
152 | } | 152 | } |
153 | 153 | ||
154 | /* register the channel for connection */ | 154 | /* register the channel for connection */ |
@@ -163,7 +163,7 @@ xpc_connect(int ch_number, xpc_channel_func func, void *key, u16 payload_size, | |||
163 | 163 | ||
164 | xpc_interface.connect(ch_number); | 164 | xpc_interface.connect(ch_number); |
165 | 165 | ||
166 | return xpcSuccess; | 166 | return xpSuccess; |
167 | } | 167 | } |
168 | EXPORT_SYMBOL_GPL(xpc_connect); | 168 | EXPORT_SYMBOL_GPL(xpc_connect); |
169 | 169 | ||
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 9eb6d4a3269c..67b179abf4a1 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -412,7 +412,7 @@ struct xpc_channel { | |||
412 | spinlock_t lock; /* lock for updating this structure */ | 412 | spinlock_t lock; /* lock for updating this structure */ |
413 | u32 flags; /* general flags */ | 413 | u32 flags; /* general flags */ |
414 | 414 | ||
415 | enum xpc_retval reason; /* reason why channel is disconnect'g */ | 415 | enum xp_retval reason; /* reason why channel is disconnect'g */ |
416 | int reason_line; /* line# disconnect initiated from */ | 416 | int reason_line; /* line# disconnect initiated from */ |
417 | 417 | ||
418 | u16 number; /* channel # */ | 418 | u16 number; /* channel # */ |
@@ -522,7 +522,7 @@ struct xpc_partition { | |||
522 | spinlock_t act_lock; /* protect updating of act_state */ | 522 | spinlock_t act_lock; /* protect updating of act_state */ |
523 | u8 act_state; /* from XPC HB viewpoint */ | 523 | u8 act_state; /* from XPC HB viewpoint */ |
524 | u8 remote_vars_version; /* version# of partition's vars */ | 524 | u8 remote_vars_version; /* version# of partition's vars */ |
525 | enum xpc_retval reason; /* reason partition is deactivating */ | 525 | enum xp_retval reason; /* reason partition is deactivating */ |
526 | int reason_line; /* line# deactivation initiated from */ | 526 | int reason_line; /* line# deactivation initiated from */ |
527 | int reactivate_nasid; /* nasid in partition to reactivate */ | 527 | int reactivate_nasid; /* nasid in partition to reactivate */ |
528 | 528 | ||
@@ -646,31 +646,31 @@ extern void xpc_allow_IPI_ops(void); | |||
646 | extern void xpc_restrict_IPI_ops(void); | 646 | extern void xpc_restrict_IPI_ops(void); |
647 | extern int xpc_identify_act_IRQ_sender(void); | 647 | extern int xpc_identify_act_IRQ_sender(void); |
648 | extern int xpc_partition_disengaged(struct xpc_partition *); | 648 | extern int xpc_partition_disengaged(struct xpc_partition *); |
649 | extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *); | 649 | extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); |
650 | extern void xpc_mark_partition_inactive(struct xpc_partition *); | 650 | extern void xpc_mark_partition_inactive(struct xpc_partition *); |
651 | extern void xpc_discovery(void); | 651 | extern void xpc_discovery(void); |
652 | extern void xpc_check_remote_hb(void); | 652 | extern void xpc_check_remote_hb(void); |
653 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, | 653 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, |
654 | enum xpc_retval); | 654 | enum xp_retval); |
655 | extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *); | 655 | extern enum xp_retval xpc_initiate_partid_to_nasids(partid_t, void *); |
656 | 656 | ||
657 | /* found in xpc_channel.c */ | 657 | /* found in xpc_channel.c */ |
658 | extern void xpc_initiate_connect(int); | 658 | extern void xpc_initiate_connect(int); |
659 | extern void xpc_initiate_disconnect(int); | 659 | extern void xpc_initiate_disconnect(int); |
660 | extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **); | 660 | extern enum xp_retval xpc_initiate_allocate(partid_t, int, u32, void **); |
661 | extern enum xpc_retval xpc_initiate_send(partid_t, int, void *); | 661 | extern enum xp_retval xpc_initiate_send(partid_t, int, void *); |
662 | extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *, | 662 | extern enum xp_retval xpc_initiate_send_notify(partid_t, int, void *, |
663 | xpc_notify_func, void *); | 663 | xpc_notify_func, void *); |
664 | extern void xpc_initiate_received(partid_t, int, void *); | 664 | extern void xpc_initiate_received(partid_t, int, void *); |
665 | extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *); | 665 | extern enum xp_retval xpc_setup_infrastructure(struct xpc_partition *); |
666 | extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *); | 666 | extern enum xp_retval xpc_pull_remote_vars_part(struct xpc_partition *); |
667 | extern void xpc_process_channel_activity(struct xpc_partition *); | 667 | extern void xpc_process_channel_activity(struct xpc_partition *); |
668 | extern void xpc_connected_callout(struct xpc_channel *); | 668 | extern void xpc_connected_callout(struct xpc_channel *); |
669 | extern void xpc_deliver_msg(struct xpc_channel *); | 669 | extern void xpc_deliver_msg(struct xpc_channel *); |
670 | extern void xpc_disconnect_channel(const int, struct xpc_channel *, | 670 | extern void xpc_disconnect_channel(const int, struct xpc_channel *, |
671 | enum xpc_retval, unsigned long *); | 671 | enum xp_retval, unsigned long *); |
672 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval); | 672 | extern void xpc_disconnect_callout(struct xpc_channel *, enum xp_retval); |
673 | extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval); | 673 | extern void xpc_partition_going_down(struct xpc_partition *, enum xp_retval); |
674 | extern void xpc_teardown_infrastructure(struct xpc_partition *); | 674 | extern void xpc_teardown_infrastructure(struct xpc_partition *); |
675 | 675 | ||
676 | static inline void | 676 | static inline void |
@@ -901,7 +901,7 @@ xpc_IPI_receive(AMO_t *amo) | |||
901 | return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); | 901 | return FETCHOP_LOAD_OP(TO_AMO((u64)&amo->variable), FETCHOP_CLEAR); |
902 | } | 902 | } |
903 | 903 | ||
904 | static inline enum xpc_retval | 904 | static inline enum xp_retval |
905 | xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | 905 | xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) |
906 | { | 906 | { |
907 | int ret = 0; | 907 | int ret = 0; |
@@ -923,7 +923,7 @@ xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector) | |||
923 | 923 | ||
924 | local_irq_restore(irq_flags); | 924 | local_irq_restore(irq_flags); |
925 | 925 | ||
926 | return ((ret == 0) ? xpcSuccess : xpcPioReadError); | 926 | return ((ret == 0) ? xpSuccess : xpPioReadError); |
927 | } | 927 | } |
928 | 928 | ||
929 | /* | 929 | /* |
@@ -992,7 +992,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, | |||
992 | unsigned long *irq_flags) | 992 | unsigned long *irq_flags) |
993 | { | 993 | { |
994 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 994 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
995 | enum xpc_retval ret; | 995 | enum xp_retval ret; |
996 | 996 | ||
997 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { | 997 | if (likely(part->act_state != XPC_P_DEACTIVATING)) { |
998 | ret = xpc_IPI_send(part->remote_IPI_amo_va, | 998 | ret = xpc_IPI_send(part->remote_IPI_amo_va, |
@@ -1001,7 +1001,7 @@ xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string, | |||
1001 | part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); | 1001 | part->remote_IPI_phys_cpuid, SGI_XPC_NOTIFY); |
1002 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", | 1002 | dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n", |
1003 | ipi_flag_string, ch->partid, ch->number, ret); | 1003 | ipi_flag_string, ch->partid, ch->number, ret); |
1004 | if (unlikely(ret != xpcSuccess)) { | 1004 | if (unlikely(ret != xpSuccess)) { |
1005 | if (irq_flags != NULL) | 1005 | if (irq_flags != NULL) |
1006 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 1006 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
1007 | XPC_DEACTIVATE_PARTITION(part, ret); | 1007 | XPC_DEACTIVATE_PARTITION(part, ret); |
@@ -1123,41 +1123,10 @@ xpc_IPI_init(int index) | |||
1123 | return amo; | 1123 | return amo; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | static inline enum xpc_retval | 1126 | static inline enum xp_retval |
1127 | xpc_map_bte_errors(bte_result_t error) | 1127 | xpc_map_bte_errors(bte_result_t error) |
1128 | { | 1128 | { |
1129 | if (error == BTE_SUCCESS) | 1129 | return ((error == BTE_SUCCESS) ? xpSuccess : xpBteCopyError); |
1130 | return xpcSuccess; | ||
1131 | |||
1132 | if (is_shub2()) { | ||
1133 | if (BTE_VALID_SH2_ERROR(error)) | ||
1134 | return xpcBteSh2Start + error; | ||
1135 | return xpcBteUnmappedError; | ||
1136 | } | ||
1137 | switch (error) { | ||
1138 | case BTE_SUCCESS: | ||
1139 | return xpcSuccess; | ||
1140 | case BTEFAIL_DIR: | ||
1141 | return xpcBteDirectoryError; | ||
1142 | case BTEFAIL_POISON: | ||
1143 | return xpcBtePoisonError; | ||
1144 | case BTEFAIL_WERR: | ||
1145 | return xpcBteWriteError; | ||
1146 | case BTEFAIL_ACCESS: | ||
1147 | return xpcBteAccessError; | ||
1148 | case BTEFAIL_PWERR: | ||
1149 | return xpcBtePWriteError; | ||
1150 | case BTEFAIL_PRERR: | ||
1151 | return xpcBtePReadError; | ||
1152 | case BTEFAIL_TOUT: | ||
1153 | return xpcBteTimeOutError; | ||
1154 | case BTEFAIL_XTERR: | ||
1155 | return xpcBteXtalkError; | ||
1156 | case BTEFAIL_NOTAVAIL: | ||
1157 | return xpcBteNotAvailable; | ||
1158 | default: | ||
1159 | return xpcBteUnmappedError; | ||
1160 | } | ||
1161 | } | 1130 | } |
1162 | 1131 | ||
1163 | /* | 1132 | /* |
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index bfcb9ea968e9..74ec506755a3 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -90,7 +90,7 @@ xpc_initialize_channels(struct xpc_partition *part, partid_t partid) | |||
90 | * Setup the infrastructure necessary to support XPartition Communication | 90 | * Setup the infrastructure necessary to support XPartition Communication |
91 | * between the specified remote partition and the local one. | 91 | * between the specified remote partition and the local one. |
92 | */ | 92 | */ |
93 | enum xpc_retval | 93 | enum xp_retval |
94 | xpc_setup_infrastructure(struct xpc_partition *part) | 94 | xpc_setup_infrastructure(struct xpc_partition *part) |
95 | { | 95 | { |
96 | int ret, cpuid; | 96 | int ret, cpuid; |
@@ -114,7 +114,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
114 | GFP_KERNEL); | 114 | GFP_KERNEL); |
115 | if (part->channels == NULL) { | 115 | if (part->channels == NULL) { |
116 | dev_err(xpc_chan, "can't get memory for channels\n"); | 116 | dev_err(xpc_chan, "can't get memory for channels\n"); |
117 | return xpcNoMemory; | 117 | return xpNoMemory; |
118 | } | 118 | } |
119 | 119 | ||
120 | part->nchannels = XPC_NCHANNELS; | 120 | part->nchannels = XPC_NCHANNELS; |
@@ -129,7 +129,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
129 | part->channels = NULL; | 129 | part->channels = NULL; |
130 | dev_err(xpc_chan, "can't get memory for local get/put " | 130 | dev_err(xpc_chan, "can't get memory for local get/put " |
131 | "values\n"); | 131 | "values\n"); |
132 | return xpcNoMemory; | 132 | return xpNoMemory; |
133 | } | 133 | } |
134 | 134 | ||
135 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, | 135 | part->remote_GPs = xpc_kzalloc_cacheline_aligned(XPC_GP_SIZE, |
@@ -143,7 +143,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
143 | part->local_GPs = NULL; | 143 | part->local_GPs = NULL; |
144 | kfree(part->channels); | 144 | kfree(part->channels); |
145 | part->channels = NULL; | 145 | part->channels = NULL; |
146 | return xpcNoMemory; | 146 | return xpNoMemory; |
147 | } | 147 | } |
148 | 148 | ||
149 | /* allocate all the required open and close args */ | 149 | /* allocate all the required open and close args */ |
@@ -159,7 +159,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
159 | part->local_GPs = NULL; | 159 | part->local_GPs = NULL; |
160 | kfree(part->channels); | 160 | kfree(part->channels); |
161 | part->channels = NULL; | 161 | part->channels = NULL; |
162 | return xpcNoMemory; | 162 | return xpNoMemory; |
163 | } | 163 | } |
164 | 164 | ||
165 | part->remote_openclose_args = | 165 | part->remote_openclose_args = |
@@ -175,7 +175,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
175 | part->local_GPs = NULL; | 175 | part->local_GPs = NULL; |
176 | kfree(part->channels); | 176 | kfree(part->channels); |
177 | part->channels = NULL; | 177 | part->channels = NULL; |
178 | return xpcNoMemory; | 178 | return xpNoMemory; |
179 | } | 179 | } |
180 | 180 | ||
181 | xpc_initialize_channels(part, partid); | 181 | xpc_initialize_channels(part, partid); |
@@ -209,7 +209,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
209 | part->local_GPs = NULL; | 209 | part->local_GPs = NULL; |
210 | kfree(part->channels); | 210 | kfree(part->channels); |
211 | part->channels = NULL; | 211 | part->channels = NULL; |
212 | return xpcLackOfResources; | 212 | return xpLackOfResources; |
213 | } | 213 | } |
214 | 214 | ||
215 | /* Setup a timer to check for dropped IPIs */ | 215 | /* Setup a timer to check for dropped IPIs */ |
@@ -243,7 +243,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
243 | xpc_vars_part[partid].nchannels = part->nchannels; | 243 | xpc_vars_part[partid].nchannels = part->nchannels; |
244 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; | 244 | xpc_vars_part[partid].magic = XPC_VP_MAGIC1; |
245 | 245 | ||
246 | return xpcSuccess; | 246 | return xpSuccess; |
247 | } | 247 | } |
248 | 248 | ||
249 | /* | 249 | /* |
@@ -254,7 +254,7 @@ xpc_setup_infrastructure(struct xpc_partition *part) | |||
254 | * dst must be a cacheline aligned virtual address on this partition. | 254 | * dst must be a cacheline aligned virtual address on this partition. |
255 | * cnt must be an cacheline sized | 255 | * cnt must be an cacheline sized |
256 | */ | 256 | */ |
257 | static enum xpc_retval | 257 | static enum xp_retval |
258 | xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | 258 | xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, |
259 | const void *src, size_t cnt) | 259 | const void *src, size_t cnt) |
260 | { | 260 | { |
@@ -270,7 +270,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | |||
270 | bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, | 270 | bte_ret = xp_bte_copy((u64)src, (u64)dst, (u64)cnt, |
271 | (BTE_NORMAL | BTE_WACQUIRE), NULL); | 271 | (BTE_NORMAL | BTE_WACQUIRE), NULL); |
272 | if (bte_ret == BTE_SUCCESS) | 272 | if (bte_ret == BTE_SUCCESS) |
273 | return xpcSuccess; | 273 | return xpSuccess; |
274 | 274 | ||
275 | dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", | 275 | dev_dbg(xpc_chan, "xp_bte_copy() from partition %d failed, ret=%d\n", |
276 | XPC_PARTID(part), bte_ret); | 276 | XPC_PARTID(part), bte_ret); |
@@ -282,7 +282,7 @@ xpc_pull_remote_cachelines(struct xpc_partition *part, void *dst, | |||
282 | * Pull the remote per partition specific variables from the specified | 282 | * Pull the remote per partition specific variables from the specified |
283 | * partition. | 283 | * partition. |
284 | */ | 284 | */ |
285 | enum xpc_retval | 285 | enum xp_retval |
286 | xpc_pull_remote_vars_part(struct xpc_partition *part) | 286 | xpc_pull_remote_vars_part(struct xpc_partition *part) |
287 | { | 287 | { |
288 | u8 buffer[L1_CACHE_BYTES * 2]; | 288 | u8 buffer[L1_CACHE_BYTES * 2]; |
@@ -291,7 +291,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
291 | struct xpc_vars_part *pulled_entry; | 291 | struct xpc_vars_part *pulled_entry; |
292 | u64 remote_entry_cacheline_pa, remote_entry_pa; | 292 | u64 remote_entry_cacheline_pa, remote_entry_pa; |
293 | partid_t partid = XPC_PARTID(part); | 293 | partid_t partid = XPC_PARTID(part); |
294 | enum xpc_retval ret; | 294 | enum xp_retval ret; |
295 | 295 | ||
296 | /* pull the cacheline that contains the variables we're interested in */ | 296 | /* pull the cacheline that contains the variables we're interested in */ |
297 | 297 | ||
@@ -311,7 +311,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
311 | ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, | 311 | ret = xpc_pull_remote_cachelines(part, pulled_entry_cacheline, |
312 | (void *)remote_entry_cacheline_pa, | 312 | (void *)remote_entry_cacheline_pa, |
313 | L1_CACHE_BYTES); | 313 | L1_CACHE_BYTES); |
314 | if (ret != xpcSuccess) { | 314 | if (ret != xpSuccess) { |
315 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " | 315 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " |
316 | "partition %d, ret=%d\n", partid, ret); | 316 | "partition %d, ret=%d\n", partid, ret); |
317 | return ret; | 317 | return ret; |
@@ -326,11 +326,11 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
326 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " | 326 | dev_dbg(xpc_chan, "partition %d's XPC vars_part for " |
327 | "partition %d has bad magic value (=0x%lx)\n", | 327 | "partition %d has bad magic value (=0x%lx)\n", |
328 | partid, sn_partition_id, pulled_entry->magic); | 328 | partid, sn_partition_id, pulled_entry->magic); |
329 | return xpcBadMagic; | 329 | return xpBadMagic; |
330 | } | 330 | } |
331 | 331 | ||
332 | /* they've not been initialized yet */ | 332 | /* they've not been initialized yet */ |
333 | return xpcRetry; | 333 | return xpRetry; |
334 | } | 334 | } |
335 | 335 | ||
336 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { | 336 | if (xpc_vars_part[partid].magic == XPC_VP_MAGIC1) { |
@@ -344,7 +344,7 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
344 | dev_err(xpc_chan, "partition %d's XPC vars_part for " | 344 | dev_err(xpc_chan, "partition %d's XPC vars_part for " |
345 | "partition %d are not valid\n", partid, | 345 | "partition %d are not valid\n", partid, |
346 | sn_partition_id); | 346 | sn_partition_id); |
347 | return xpcInvalidAddress; | 347 | return xpInvalidAddress; |
348 | } | 348 | } |
349 | 349 | ||
350 | /* the variables we imported look to be valid */ | 350 | /* the variables we imported look to be valid */ |
@@ -366,9 +366,9 @@ xpc_pull_remote_vars_part(struct xpc_partition *part) | |||
366 | } | 366 | } |
367 | 367 | ||
368 | if (pulled_entry->magic == XPC_VP_MAGIC1) | 368 | if (pulled_entry->magic == XPC_VP_MAGIC1) |
369 | return xpcRetry; | 369 | return xpRetry; |
370 | 370 | ||
371 | return xpcSuccess; | 371 | return xpSuccess; |
372 | } | 372 | } |
373 | 373 | ||
374 | /* | 374 | /* |
@@ -379,7 +379,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
379 | { | 379 | { |
380 | unsigned long irq_flags; | 380 | unsigned long irq_flags; |
381 | u64 IPI_amo; | 381 | u64 IPI_amo; |
382 | enum xpc_retval ret; | 382 | enum xp_retval ret; |
383 | 383 | ||
384 | /* | 384 | /* |
385 | * See if there are any IPI flags to be handled. | 385 | * See if there are any IPI flags to be handled. |
@@ -398,7 +398,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
398 | (void *)part-> | 398 | (void *)part-> |
399 | remote_openclose_args_pa, | 399 | remote_openclose_args_pa, |
400 | XPC_OPENCLOSE_ARGS_SIZE); | 400 | XPC_OPENCLOSE_ARGS_SIZE); |
401 | if (ret != xpcSuccess) { | 401 | if (ret != xpSuccess) { |
402 | XPC_DEACTIVATE_PARTITION(part, ret); | 402 | XPC_DEACTIVATE_PARTITION(part, ret); |
403 | 403 | ||
404 | dev_dbg(xpc_chan, "failed to pull openclose args from " | 404 | dev_dbg(xpc_chan, "failed to pull openclose args from " |
@@ -414,7 +414,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
414 | ret = xpc_pull_remote_cachelines(part, part->remote_GPs, | 414 | ret = xpc_pull_remote_cachelines(part, part->remote_GPs, |
415 | (void *)part->remote_GPs_pa, | 415 | (void *)part->remote_GPs_pa, |
416 | XPC_GP_SIZE); | 416 | XPC_GP_SIZE); |
417 | if (ret != xpcSuccess) { | 417 | if (ret != xpSuccess) { |
418 | XPC_DEACTIVATE_PARTITION(part, ret); | 418 | XPC_DEACTIVATE_PARTITION(part, ret); |
419 | 419 | ||
420 | dev_dbg(xpc_chan, "failed to pull GPs from partition " | 420 | dev_dbg(xpc_chan, "failed to pull GPs from partition " |
@@ -431,7 +431,7 @@ xpc_get_IPI_flags(struct xpc_partition *part) | |||
431 | /* | 431 | /* |
432 | * Allocate the local message queue and the notify queue. | 432 | * Allocate the local message queue and the notify queue. |
433 | */ | 433 | */ |
434 | static enum xpc_retval | 434 | static enum xp_retval |
435 | xpc_allocate_local_msgqueue(struct xpc_channel *ch) | 435 | xpc_allocate_local_msgqueue(struct xpc_channel *ch) |
436 | { | 436 | { |
437 | unsigned long irq_flags; | 437 | unsigned long irq_flags; |
@@ -464,18 +464,18 @@ xpc_allocate_local_msgqueue(struct xpc_channel *ch) | |||
464 | ch->local_nentries = nentries; | 464 | ch->local_nentries = nentries; |
465 | } | 465 | } |
466 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 466 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
467 | return xpcSuccess; | 467 | return xpSuccess; |
468 | } | 468 | } |
469 | 469 | ||
470 | dev_dbg(xpc_chan, "can't get memory for local message queue and notify " | 470 | dev_dbg(xpc_chan, "can't get memory for local message queue and notify " |
471 | "queue, partid=%d, channel=%d\n", ch->partid, ch->number); | 471 | "queue, partid=%d, channel=%d\n", ch->partid, ch->number); |
472 | return xpcNoMemory; | 472 | return xpNoMemory; |
473 | } | 473 | } |
474 | 474 | ||
475 | /* | 475 | /* |
476 | * Allocate the cached remote message queue. | 476 | * Allocate the cached remote message queue. |
477 | */ | 477 | */ |
478 | static enum xpc_retval | 478 | static enum xp_retval |
479 | xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | 479 | xpc_allocate_remote_msgqueue(struct xpc_channel *ch) |
480 | { | 480 | { |
481 | unsigned long irq_flags; | 481 | unsigned long irq_flags; |
@@ -502,12 +502,12 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | |||
502 | ch->remote_nentries = nentries; | 502 | ch->remote_nentries = nentries; |
503 | } | 503 | } |
504 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 504 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
505 | return xpcSuccess; | 505 | return xpSuccess; |
506 | } | 506 | } |
507 | 507 | ||
508 | dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " | 508 | dev_dbg(xpc_chan, "can't get memory for cached remote message queue, " |
509 | "partid=%d, channel=%d\n", ch->partid, ch->number); | 509 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
510 | return xpcNoMemory; | 510 | return xpNoMemory; |
511 | } | 511 | } |
512 | 512 | ||
513 | /* | 513 | /* |
@@ -515,20 +515,20 @@ xpc_allocate_remote_msgqueue(struct xpc_channel *ch) | |||
515 | * | 515 | * |
516 | * Note: Assumes all of the channel sizes are filled in. | 516 | * Note: Assumes all of the channel sizes are filled in. |
517 | */ | 517 | */ |
518 | static enum xpc_retval | 518 | static enum xp_retval |
519 | xpc_allocate_msgqueues(struct xpc_channel *ch) | 519 | xpc_allocate_msgqueues(struct xpc_channel *ch) |
520 | { | 520 | { |
521 | unsigned long irq_flags; | 521 | unsigned long irq_flags; |
522 | enum xpc_retval ret; | 522 | enum xp_retval ret; |
523 | 523 | ||
524 | DBUG_ON(ch->flags & XPC_C_SETUP); | 524 | DBUG_ON(ch->flags & XPC_C_SETUP); |
525 | 525 | ||
526 | ret = xpc_allocate_local_msgqueue(ch); | 526 | ret = xpc_allocate_local_msgqueue(ch); |
527 | if (ret != xpcSuccess) | 527 | if (ret != xpSuccess) |
528 | return ret; | 528 | return ret; |
529 | 529 | ||
530 | ret = xpc_allocate_remote_msgqueue(ch); | 530 | ret = xpc_allocate_remote_msgqueue(ch); |
531 | if (ret != xpcSuccess) { | 531 | if (ret != xpSuccess) { |
532 | kfree(ch->local_msgqueue_base); | 532 | kfree(ch->local_msgqueue_base); |
533 | ch->local_msgqueue = NULL; | 533 | ch->local_msgqueue = NULL; |
534 | kfree(ch->notify_queue); | 534 | kfree(ch->notify_queue); |
@@ -540,7 +540,7 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) | |||
540 | ch->flags |= XPC_C_SETUP; | 540 | ch->flags |= XPC_C_SETUP; |
541 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 541 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
542 | 542 | ||
543 | return xpcSuccess; | 543 | return xpSuccess; |
544 | } | 544 | } |
545 | 545 | ||
546 | /* | 546 | /* |
@@ -552,7 +552,7 @@ xpc_allocate_msgqueues(struct xpc_channel *ch) | |||
552 | static void | 552 | static void |
553 | xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | 553 | xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) |
554 | { | 554 | { |
555 | enum xpc_retval ret; | 555 | enum xp_retval ret; |
556 | 556 | ||
557 | DBUG_ON(!spin_is_locked(&ch->lock)); | 557 | DBUG_ON(!spin_is_locked(&ch->lock)); |
558 | 558 | ||
@@ -568,7 +568,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
568 | ret = xpc_allocate_msgqueues(ch); | 568 | ret = xpc_allocate_msgqueues(ch); |
569 | spin_lock_irqsave(&ch->lock, *irq_flags); | 569 | spin_lock_irqsave(&ch->lock, *irq_flags); |
570 | 570 | ||
571 | if (ret != xpcSuccess) | 571 | if (ret != xpSuccess) |
572 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); | 572 | XPC_DISCONNECT_CHANNEL(ch, ret, irq_flags); |
573 | 573 | ||
574 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) | 574 | if (ch->flags & (XPC_C_CONNECTED | XPC_C_DISCONNECTING)) |
@@ -603,7 +603,7 @@ xpc_process_connect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
603 | * Notify those who wanted to be notified upon delivery of their message. | 603 | * Notify those who wanted to be notified upon delivery of their message. |
604 | */ | 604 | */ |
605 | static void | 605 | static void |
606 | xpc_notify_senders(struct xpc_channel *ch, enum xpc_retval reason, s64 put) | 606 | xpc_notify_senders(struct xpc_channel *ch, enum xp_retval reason, s64 put) |
607 | { | 607 | { |
608 | struct xpc_notify *notify; | 608 | struct xpc_notify *notify; |
609 | u8 notify_type; | 609 | u8 notify_type; |
@@ -748,7 +748,7 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags) | |||
748 | 748 | ||
749 | if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { | 749 | if (ch->flags & XPC_C_DISCONNECTINGCALLOUT_MADE) { |
750 | spin_unlock_irqrestore(&ch->lock, *irq_flags); | 750 | spin_unlock_irqrestore(&ch->lock, *irq_flags); |
751 | xpc_disconnect_callout(ch, xpcDisconnected); | 751 | xpc_disconnect_callout(ch, xpDisconnected); |
752 | spin_lock_irqsave(&ch->lock, *irq_flags); | 752 | spin_lock_irqsave(&ch->lock, *irq_flags); |
753 | } | 753 | } |
754 | 754 | ||
@@ -791,7 +791,7 @@ xpc_process_openclose_IPI(struct xpc_partition *part, int ch_number, | |||
791 | struct xpc_openclose_args *args = | 791 | struct xpc_openclose_args *args = |
792 | &part->remote_openclose_args[ch_number]; | 792 | &part->remote_openclose_args[ch_number]; |
793 | struct xpc_channel *ch = &part->channels[ch_number]; | 793 | struct xpc_channel *ch = &part->channels[ch_number]; |
794 | enum xpc_retval reason; | 794 | enum xp_retval reason; |
795 | 795 | ||
796 | spin_lock_irqsave(&ch->lock, irq_flags); | 796 | spin_lock_irqsave(&ch->lock, irq_flags); |
797 | 797 | ||
@@ -871,10 +871,10 @@ again: | |||
871 | 871 | ||
872 | if (!(ch->flags & XPC_C_DISCONNECTING)) { | 872 | if (!(ch->flags & XPC_C_DISCONNECTING)) { |
873 | reason = args->reason; | 873 | reason = args->reason; |
874 | if (reason <= xpcSuccess || reason > xpcUnknownReason) | 874 | if (reason <= xpSuccess || reason > xpUnknownReason) |
875 | reason = xpcUnknownReason; | 875 | reason = xpUnknownReason; |
876 | else if (reason == xpcUnregistering) | 876 | else if (reason == xpUnregistering) |
877 | reason = xpcOtherUnregistering; | 877 | reason = xpOtherUnregistering; |
878 | 878 | ||
879 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); | 879 | XPC_DISCONNECT_CHANNEL(ch, reason, &irq_flags); |
880 | 880 | ||
@@ -961,7 +961,7 @@ again: | |||
961 | 961 | ||
962 | if (ch->flags & XPC_C_OPENREQUEST) { | 962 | if (ch->flags & XPC_C_OPENREQUEST) { |
963 | if (args->msg_size != ch->msg_size) { | 963 | if (args->msg_size != ch->msg_size) { |
964 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, | 964 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, |
965 | &irq_flags); | 965 | &irq_flags); |
966 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 966 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
967 | return; | 967 | return; |
@@ -991,7 +991,7 @@ again: | |||
991 | return; | 991 | return; |
992 | } | 992 | } |
993 | if (!(ch->flags & XPC_C_OPENREQUEST)) { | 993 | if (!(ch->flags & XPC_C_OPENREQUEST)) { |
994 | XPC_DISCONNECT_CHANNEL(ch, xpcOpenCloseError, | 994 | XPC_DISCONNECT_CHANNEL(ch, xpOpenCloseError, |
995 | &irq_flags); | 995 | &irq_flags); |
996 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 996 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
997 | return; | 997 | return; |
@@ -1042,18 +1042,18 @@ again: | |||
1042 | /* | 1042 | /* |
1043 | * Attempt to establish a channel connection to a remote partition. | 1043 | * Attempt to establish a channel connection to a remote partition. |
1044 | */ | 1044 | */ |
1045 | static enum xpc_retval | 1045 | static enum xp_retval |
1046 | xpc_connect_channel(struct xpc_channel *ch) | 1046 | xpc_connect_channel(struct xpc_channel *ch) |
1047 | { | 1047 | { |
1048 | unsigned long irq_flags; | 1048 | unsigned long irq_flags; |
1049 | struct xpc_registration *registration = &xpc_registrations[ch->number]; | 1049 | struct xpc_registration *registration = &xpc_registrations[ch->number]; |
1050 | 1050 | ||
1051 | if (mutex_trylock(®istration->mutex) == 0) | 1051 | if (mutex_trylock(®istration->mutex) == 0) |
1052 | return xpcRetry; | 1052 | return xpRetry; |
1053 | 1053 | ||
1054 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { | 1054 | if (!XPC_CHANNEL_REGISTERED(ch->number)) { |
1055 | mutex_unlock(®istration->mutex); | 1055 | mutex_unlock(®istration->mutex); |
1056 | return xpcUnregistered; | 1056 | return xpUnregistered; |
1057 | } | 1057 | } |
1058 | 1058 | ||
1059 | spin_lock_irqsave(&ch->lock, irq_flags); | 1059 | spin_lock_irqsave(&ch->lock, irq_flags); |
@@ -1095,10 +1095,10 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1095 | * the channel lock as needed. | 1095 | * the channel lock as needed. |
1096 | */ | 1096 | */ |
1097 | mutex_unlock(®istration->mutex); | 1097 | mutex_unlock(®istration->mutex); |
1098 | XPC_DISCONNECT_CHANNEL(ch, xpcUnequalMsgSizes, | 1098 | XPC_DISCONNECT_CHANNEL(ch, xpUnequalMsgSizes, |
1099 | &irq_flags); | 1099 | &irq_flags); |
1100 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1100 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
1101 | return xpcUnequalMsgSizes; | 1101 | return xpUnequalMsgSizes; |
1102 | } | 1102 | } |
1103 | } else { | 1103 | } else { |
1104 | ch->msg_size = registration->msg_size; | 1104 | ch->msg_size = registration->msg_size; |
@@ -1120,7 +1120,7 @@ xpc_connect_channel(struct xpc_channel *ch) | |||
1120 | 1120 | ||
1121 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 1121 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
1122 | 1122 | ||
1123 | return xpcSuccess; | 1123 | return xpSuccess; |
1124 | } | 1124 | } |
1125 | 1125 | ||
1126 | /* | 1126 | /* |
@@ -1203,7 +1203,7 @@ xpc_process_msg_IPI(struct xpc_partition *part, int ch_number) | |||
1203 | * Notify senders that messages sent have been | 1203 | * Notify senders that messages sent have been |
1204 | * received and delivered by the other side. | 1204 | * received and delivered by the other side. |
1205 | */ | 1205 | */ |
1206 | xpc_notify_senders(ch, xpcMsgDelivered, | 1206 | xpc_notify_senders(ch, xpMsgDelivered, |
1207 | ch->remote_GP.get); | 1207 | ch->remote_GP.get); |
1208 | } | 1208 | } |
1209 | 1209 | ||
@@ -1335,7 +1335,7 @@ xpc_process_channel_activity(struct xpc_partition *part) | |||
1335 | * at the same time. | 1335 | * at the same time. |
1336 | */ | 1336 | */ |
1337 | void | 1337 | void |
1338 | xpc_partition_going_down(struct xpc_partition *part, enum xpc_retval reason) | 1338 | xpc_partition_going_down(struct xpc_partition *part, enum xp_retval reason) |
1339 | { | 1339 | { |
1340 | unsigned long irq_flags; | 1340 | unsigned long irq_flags; |
1341 | int ch_number; | 1341 | int ch_number; |
@@ -1456,13 +1456,13 @@ xpc_connected_callout(struct xpc_channel *ch) | |||
1456 | /* let the registerer know that a connection has been established */ | 1456 | /* let the registerer know that a connection has been established */ |
1457 | 1457 | ||
1458 | if (ch->func != NULL) { | 1458 | if (ch->func != NULL) { |
1459 | dev_dbg(xpc_chan, "ch->func() called, reason=xpcConnected, " | 1459 | dev_dbg(xpc_chan, "ch->func() called, reason=xpConnected, " |
1460 | "partid=%d, channel=%d\n", ch->partid, ch->number); | 1460 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
1461 | 1461 | ||
1462 | ch->func(xpcConnected, ch->partid, ch->number, | 1462 | ch->func(xpConnected, ch->partid, ch->number, |
1463 | (void *)(u64)ch->local_nentries, ch->key); | 1463 | (void *)(u64)ch->local_nentries, ch->key); |
1464 | 1464 | ||
1465 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpcConnected, " | 1465 | dev_dbg(xpc_chan, "ch->func() returned, reason=xpConnected, " |
1466 | "partid=%d, channel=%d\n", ch->partid, ch->number); | 1466 | "partid=%d, channel=%d\n", ch->partid, ch->number); |
1467 | } | 1467 | } |
1468 | } | 1468 | } |
@@ -1503,7 +1503,7 @@ xpc_initiate_disconnect(int ch_number) | |||
1503 | if (!(ch->flags & XPC_C_DISCONNECTED)) { | 1503 | if (!(ch->flags & XPC_C_DISCONNECTED)) { |
1504 | ch->flags |= XPC_C_WDISCONNECT; | 1504 | ch->flags |= XPC_C_WDISCONNECT; |
1505 | 1505 | ||
1506 | XPC_DISCONNECT_CHANNEL(ch, xpcUnregistering, | 1506 | XPC_DISCONNECT_CHANNEL(ch, xpUnregistering, |
1507 | &irq_flags); | 1507 | &irq_flags); |
1508 | } | 1508 | } |
1509 | 1509 | ||
@@ -1528,7 +1528,7 @@ xpc_initiate_disconnect(int ch_number) | |||
1528 | */ | 1528 | */ |
1529 | void | 1529 | void |
1530 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, | 1530 | xpc_disconnect_channel(const int line, struct xpc_channel *ch, |
1531 | enum xpc_retval reason, unsigned long *irq_flags) | 1531 | enum xp_retval reason, unsigned long *irq_flags) |
1532 | { | 1532 | { |
1533 | u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); | 1533 | u32 channel_was_connected = (ch->flags & XPC_C_CONNECTED); |
1534 | 1534 | ||
@@ -1563,7 +1563,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1563 | 1563 | ||
1564 | } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && | 1564 | } else if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) && |
1565 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { | 1565 | !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) { |
1566 | /* start a kthread that will do the xpcDisconnecting callout */ | 1566 | /* start a kthread that will do the xpDisconnecting callout */ |
1567 | xpc_create_kthreads(ch, 1, 1); | 1567 | xpc_create_kthreads(ch, 1, 1); |
1568 | } | 1568 | } |
1569 | 1569 | ||
@@ -1575,7 +1575,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch, | |||
1575 | } | 1575 | } |
1576 | 1576 | ||
1577 | void | 1577 | void |
1578 | xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) | 1578 | xpc_disconnect_callout(struct xpc_channel *ch, enum xp_retval reason) |
1579 | { | 1579 | { |
1580 | /* | 1580 | /* |
1581 | * Let the channel's registerer know that the channel is being | 1581 | * Let the channel's registerer know that the channel is being |
@@ -1598,13 +1598,13 @@ xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason) | |||
1598 | * Wait for a message entry to become available for the specified channel, | 1598 | * Wait for a message entry to become available for the specified channel, |
1599 | * but don't wait any longer than 1 jiffy. | 1599 | * but don't wait any longer than 1 jiffy. |
1600 | */ | 1600 | */ |
1601 | static enum xpc_retval | 1601 | static enum xp_retval |
1602 | xpc_allocate_msg_wait(struct xpc_channel *ch) | 1602 | xpc_allocate_msg_wait(struct xpc_channel *ch) |
1603 | { | 1603 | { |
1604 | enum xpc_retval ret; | 1604 | enum xp_retval ret; |
1605 | 1605 | ||
1606 | if (ch->flags & XPC_C_DISCONNECTING) { | 1606 | if (ch->flags & XPC_C_DISCONNECTING) { |
1607 | DBUG_ON(ch->reason == xpcInterrupted); | 1607 | DBUG_ON(ch->reason == xpInterrupted); |
1608 | return ch->reason; | 1608 | return ch->reason; |
1609 | } | 1609 | } |
1610 | 1610 | ||
@@ -1614,11 +1614,11 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) | |||
1614 | 1614 | ||
1615 | if (ch->flags & XPC_C_DISCONNECTING) { | 1615 | if (ch->flags & XPC_C_DISCONNECTING) { |
1616 | ret = ch->reason; | 1616 | ret = ch->reason; |
1617 | DBUG_ON(ch->reason == xpcInterrupted); | 1617 | DBUG_ON(ch->reason == xpInterrupted); |
1618 | } else if (ret == 0) { | 1618 | } else if (ret == 0) { |
1619 | ret = xpcTimeout; | 1619 | ret = xpTimeout; |
1620 | } else { | 1620 | } else { |
1621 | ret = xpcInterrupted; | 1621 | ret = xpInterrupted; |
1622 | } | 1622 | } |
1623 | 1623 | ||
1624 | return ret; | 1624 | return ret; |
@@ -1628,12 +1628,12 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) | |||
1628 | * Allocate an entry for a message from the message queue associated with the | 1628 | * Allocate an entry for a message from the message queue associated with the |
1629 | * specified channel. | 1629 | * specified channel. |
1630 | */ | 1630 | */ |
1631 | static enum xpc_retval | 1631 | static enum xp_retval |
1632 | xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | 1632 | xpc_allocate_msg(struct xpc_channel *ch, u32 flags, |
1633 | struct xpc_msg **address_of_msg) | 1633 | struct xpc_msg **address_of_msg) |
1634 | { | 1634 | { |
1635 | struct xpc_msg *msg; | 1635 | struct xpc_msg *msg; |
1636 | enum xpc_retval ret; | 1636 | enum xp_retval ret; |
1637 | s64 put; | 1637 | s64 put; |
1638 | 1638 | ||
1639 | /* this reference will be dropped in xpc_send_msg() */ | 1639 | /* this reference will be dropped in xpc_send_msg() */ |
@@ -1645,7 +1645,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1645 | } | 1645 | } |
1646 | if (!(ch->flags & XPC_C_CONNECTED)) { | 1646 | if (!(ch->flags & XPC_C_CONNECTED)) { |
1647 | xpc_msgqueue_deref(ch); | 1647 | xpc_msgqueue_deref(ch); |
1648 | return xpcNotConnected; | 1648 | return xpNotConnected; |
1649 | } | 1649 | } |
1650 | 1650 | ||
1651 | /* | 1651 | /* |
@@ -1653,7 +1653,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1653 | * If none are available, we'll make sure that we grab the latest | 1653 | * If none are available, we'll make sure that we grab the latest |
1654 | * GP values. | 1654 | * GP values. |
1655 | */ | 1655 | */ |
1656 | ret = xpcTimeout; | 1656 | ret = xpTimeout; |
1657 | 1657 | ||
1658 | while (1) { | 1658 | while (1) { |
1659 | 1659 | ||
@@ -1683,16 +1683,16 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1683 | * that will cause the IPI handler to fetch the latest | 1683 | * that will cause the IPI handler to fetch the latest |
1684 | * GP values as if an IPI was sent by the other side. | 1684 | * GP values as if an IPI was sent by the other side. |
1685 | */ | 1685 | */ |
1686 | if (ret == xpcTimeout) | 1686 | if (ret == xpTimeout) |
1687 | xpc_IPI_send_local_msgrequest(ch); | 1687 | xpc_IPI_send_local_msgrequest(ch); |
1688 | 1688 | ||
1689 | if (flags & XPC_NOWAIT) { | 1689 | if (flags & XPC_NOWAIT) { |
1690 | xpc_msgqueue_deref(ch); | 1690 | xpc_msgqueue_deref(ch); |
1691 | return xpcNoWait; | 1691 | return xpNoWait; |
1692 | } | 1692 | } |
1693 | 1693 | ||
1694 | ret = xpc_allocate_msg_wait(ch); | 1694 | ret = xpc_allocate_msg_wait(ch); |
1695 | if (ret != xpcInterrupted && ret != xpcTimeout) { | 1695 | if (ret != xpInterrupted && ret != xpTimeout) { |
1696 | xpc_msgqueue_deref(ch); | 1696 | xpc_msgqueue_deref(ch); |
1697 | return ret; | 1697 | return ret; |
1698 | } | 1698 | } |
@@ -1711,7 +1711,7 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1711 | 1711 | ||
1712 | *address_of_msg = msg; | 1712 | *address_of_msg = msg; |
1713 | 1713 | ||
1714 | return xpcSuccess; | 1714 | return xpSuccess; |
1715 | } | 1715 | } |
1716 | 1716 | ||
1717 | /* | 1717 | /* |
@@ -1727,11 +1727,11 @@ xpc_allocate_msg(struct xpc_channel *ch, u32 flags, | |||
1727 | * payload - address of the allocated payload area pointer (filled in on | 1727 | * payload - address of the allocated payload area pointer (filled in on |
1728 | * return) in which the user-defined message is constructed. | 1728 | * return) in which the user-defined message is constructed. |
1729 | */ | 1729 | */ |
1730 | enum xpc_retval | 1730 | enum xp_retval |
1731 | xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) | 1731 | xpc_initiate_allocate(partid_t partid, int ch_number, u32 flags, void **payload) |
1732 | { | 1732 | { |
1733 | struct xpc_partition *part = &xpc_partitions[partid]; | 1733 | struct xpc_partition *part = &xpc_partitions[partid]; |
1734 | enum xpc_retval ret = xpcUnknownReason; | 1734 | enum xp_retval ret = xpUnknownReason; |
1735 | struct xpc_msg *msg = NULL; | 1735 | struct xpc_msg *msg = NULL; |
1736 | 1736 | ||
1737 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); | 1737 | DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS); |
@@ -1814,11 +1814,11 @@ xpc_send_msgs(struct xpc_channel *ch, s64 initial_put) | |||
1814 | * local message queue's Put value and sends an IPI to the partition the | 1814 | * local message queue's Put value and sends an IPI to the partition the |
1815 | * message is being sent to. | 1815 | * message is being sent to. |
1816 | */ | 1816 | */ |
1817 | static enum xpc_retval | 1817 | static enum xp_retval |
1818 | xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | 1818 | xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, |
1819 | xpc_notify_func func, void *key) | 1819 | xpc_notify_func func, void *key) |
1820 | { | 1820 | { |
1821 | enum xpc_retval ret = xpcSuccess; | 1821 | enum xp_retval ret = xpSuccess; |
1822 | struct xpc_notify *notify = notify; | 1822 | struct xpc_notify *notify = notify; |
1823 | s64 put, msg_number = msg->number; | 1823 | s64 put, msg_number = msg->number; |
1824 | 1824 | ||
@@ -1908,12 +1908,12 @@ xpc_send_msg(struct xpc_channel *ch, struct xpc_msg *msg, u8 notify_type, | |||
1908 | * payload - pointer to the payload area allocated via | 1908 | * payload - pointer to the payload area allocated via |
1909 | * xpc_initiate_allocate(). | 1909 | * xpc_initiate_allocate(). |
1910 | */ | 1910 | */ |
1911 | enum xpc_retval | 1911 | enum xp_retval |
1912 | xpc_initiate_send(partid_t partid, int ch_number, void *payload) | 1912 | xpc_initiate_send(partid_t partid, int ch_number, void *payload) |
1913 | { | 1913 | { |
1914 | struct xpc_partition *part = &xpc_partitions[partid]; | 1914 | struct xpc_partition *part = &xpc_partitions[partid]; |
1915 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | 1915 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); |
1916 | enum xpc_retval ret; | 1916 | enum xp_retval ret; |
1917 | 1917 | ||
1918 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, | 1918 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, |
1919 | partid, ch_number); | 1919 | partid, ch_number); |
@@ -1957,13 +1957,13 @@ xpc_initiate_send(partid_t partid, int ch_number, void *payload) | |||
1957 | * receipt. THIS FUNCTION MUST BE NON-BLOCKING. | 1957 | * receipt. THIS FUNCTION MUST BE NON-BLOCKING. |
1958 | * key - user-defined key to be passed to the function when it's called. | 1958 | * key - user-defined key to be passed to the function when it's called. |
1959 | */ | 1959 | */ |
1960 | enum xpc_retval | 1960 | enum xp_retval |
1961 | xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, | 1961 | xpc_initiate_send_notify(partid_t partid, int ch_number, void *payload, |
1962 | xpc_notify_func func, void *key) | 1962 | xpc_notify_func func, void *key) |
1963 | { | 1963 | { |
1964 | struct xpc_partition *part = &xpc_partitions[partid]; | 1964 | struct xpc_partition *part = &xpc_partitions[partid]; |
1965 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); | 1965 | struct xpc_msg *msg = XPC_MSG_ADDRESS(payload); |
1966 | enum xpc_retval ret; | 1966 | enum xp_retval ret; |
1967 | 1967 | ||
1968 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, | 1968 | dev_dbg(xpc_chan, "msg=0x%p, partid=%d, channel=%d\n", (void *)msg, |
1969 | partid, ch_number); | 1969 | partid, ch_number); |
@@ -1985,7 +1985,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
1985 | struct xpc_msg *remote_msg, *msg; | 1985 | struct xpc_msg *remote_msg, *msg; |
1986 | u32 msg_index, nmsgs; | 1986 | u32 msg_index, nmsgs; |
1987 | u64 msg_offset; | 1987 | u64 msg_offset; |
1988 | enum xpc_retval ret; | 1988 | enum xp_retval ret; |
1989 | 1989 | ||
1990 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { | 1990 | if (mutex_lock_interruptible(&ch->msg_to_pull_mutex) != 0) { |
1991 | /* we were interrupted by a signal */ | 1991 | /* we were interrupted by a signal */ |
@@ -2012,7 +2012,7 @@ xpc_pull_remote_msg(struct xpc_channel *ch, s64 get) | |||
2012 | 2012 | ||
2013 | ret = xpc_pull_remote_cachelines(part, msg, remote_msg, | 2013 | ret = xpc_pull_remote_cachelines(part, msg, remote_msg, |
2014 | nmsgs * ch->msg_size); | 2014 | nmsgs * ch->msg_size); |
2015 | if (ret != xpcSuccess) { | 2015 | if (ret != xpSuccess) { |
2016 | 2016 | ||
2017 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" | 2017 | dev_dbg(xpc_chan, "failed to pull %d msgs starting with" |
2018 | " msg %ld from partition %d, channel=%d, " | 2018 | " msg %ld from partition %d, channel=%d, " |
@@ -2112,7 +2112,7 @@ xpc_deliver_msg(struct xpc_channel *ch) | |||
2112 | ch->number); | 2112 | ch->number); |
2113 | 2113 | ||
2114 | /* deliver the message to its intended recipient */ | 2114 | /* deliver the message to its intended recipient */ |
2115 | ch->func(xpcMsgReceived, ch->partid, ch->number, | 2115 | ch->func(xpMsgReceived, ch->partid, ch->number, |
2116 | &msg->payload, ch->key); | 2116 | &msg->payload, ch->key); |
2117 | 2117 | ||
2118 | dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " | 2118 | dev_dbg(xpc_chan, "ch->func() returned, msg=0x%p, " |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index f673ba90eb0e..2765b423ff33 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -315,13 +315,13 @@ xpc_initiate_discovery(void *ignore) | |||
315 | * the XPC per partition variables from the remote partition and waiting for | 315 | * the XPC per partition variables from the remote partition and waiting for |
316 | * the remote partition to pull ours. | 316 | * the remote partition to pull ours. |
317 | */ | 317 | */ |
318 | static enum xpc_retval | 318 | static enum xp_retval |
319 | xpc_make_first_contact(struct xpc_partition *part) | 319 | xpc_make_first_contact(struct xpc_partition *part) |
320 | { | 320 | { |
321 | enum xpc_retval ret; | 321 | enum xp_retval ret; |
322 | 322 | ||
323 | while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) { | 323 | while ((ret = xpc_pull_remote_vars_part(part)) != xpSuccess) { |
324 | if (ret != xpcRetry) { | 324 | if (ret != xpRetry) { |
325 | XPC_DEACTIVATE_PARTITION(part, ret); | 325 | XPC_DEACTIVATE_PARTITION(part, ret); |
326 | return ret; | 326 | return ret; |
327 | } | 327 | } |
@@ -406,7 +406,7 @@ xpc_partition_up(struct xpc_partition *part) | |||
406 | 406 | ||
407 | dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); | 407 | dev_dbg(xpc_chan, "activating partition %d\n", XPC_PARTID(part)); |
408 | 408 | ||
409 | if (xpc_setup_infrastructure(part) != xpcSuccess) | 409 | if (xpc_setup_infrastructure(part) != xpSuccess) |
410 | return; | 410 | return; |
411 | 411 | ||
412 | /* | 412 | /* |
@@ -418,7 +418,7 @@ xpc_partition_up(struct xpc_partition *part) | |||
418 | 418 | ||
419 | (void)xpc_part_ref(part); /* this will always succeed */ | 419 | (void)xpc_part_ref(part); /* this will always succeed */ |
420 | 420 | ||
421 | if (xpc_make_first_contact(part) == xpcSuccess) | 421 | if (xpc_make_first_contact(part) == xpSuccess) |
422 | xpc_channel_mgr(part); | 422 | xpc_channel_mgr(part); |
423 | 423 | ||
424 | xpc_part_deref(part); | 424 | xpc_part_deref(part); |
@@ -470,7 +470,7 @@ xpc_activating(void *__partid) | |||
470 | 470 | ||
471 | spin_lock_irqsave(&part->act_lock, irq_flags); | 471 | spin_lock_irqsave(&part->act_lock, irq_flags); |
472 | part->act_state = XPC_P_INACTIVE; | 472 | part->act_state = XPC_P_INACTIVE; |
473 | XPC_SET_REASON(part, xpcPhysAddrRegFailed, __LINE__); | 473 | XPC_SET_REASON(part, xpPhysAddrRegFailed, __LINE__); |
474 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 474 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
475 | part->remote_rp_pa = 0; | 475 | part->remote_rp_pa = 0; |
476 | return 0; | 476 | return 0; |
@@ -488,7 +488,7 @@ xpc_activating(void *__partid) | |||
488 | xpc_disallow_hb(partid, xpc_vars); | 488 | xpc_disallow_hb(partid, xpc_vars); |
489 | xpc_mark_partition_inactive(part); | 489 | xpc_mark_partition_inactive(part); |
490 | 490 | ||
491 | if (part->reason == xpcReactivating) { | 491 | if (part->reason == xpReactivating) { |
492 | /* interrupting ourselves results in activating partition */ | 492 | /* interrupting ourselves results in activating partition */ |
493 | xpc_IPI_send_reactivate(part); | 493 | xpc_IPI_send_reactivate(part); |
494 | } | 494 | } |
@@ -508,7 +508,7 @@ xpc_activate_partition(struct xpc_partition *part) | |||
508 | DBUG_ON(part->act_state != XPC_P_INACTIVE); | 508 | DBUG_ON(part->act_state != XPC_P_INACTIVE); |
509 | 509 | ||
510 | part->act_state = XPC_P_ACTIVATION_REQ; | 510 | part->act_state = XPC_P_ACTIVATION_REQ; |
511 | XPC_SET_REASON(part, xpcCloneKThread, __LINE__); | 511 | XPC_SET_REASON(part, xpCloneKThread, __LINE__); |
512 | 512 | ||
513 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 513 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
514 | 514 | ||
@@ -517,7 +517,7 @@ xpc_activate_partition(struct xpc_partition *part) | |||
517 | if (IS_ERR(kthread)) { | 517 | if (IS_ERR(kthread)) { |
518 | spin_lock_irqsave(&part->act_lock, irq_flags); | 518 | spin_lock_irqsave(&part->act_lock, irq_flags); |
519 | part->act_state = XPC_P_INACTIVE; | 519 | part->act_state = XPC_P_INACTIVE; |
520 | XPC_SET_REASON(part, xpcCloneKThreadFailed, __LINE__); | 520 | XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__); |
521 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 521 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
522 | } | 522 | } |
523 | } | 523 | } |
@@ -696,7 +696,7 @@ xpc_kthread_start(void *args) | |||
696 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; | 696 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT; |
697 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 697 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
698 | 698 | ||
699 | xpc_disconnect_callout(ch, xpcDisconnecting); | 699 | xpc_disconnect_callout(ch, xpDisconnecting); |
700 | 700 | ||
701 | spin_lock_irqsave(&ch->lock, irq_flags); | 701 | spin_lock_irqsave(&ch->lock, irq_flags); |
702 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; | 702 | ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE; |
@@ -776,7 +776,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
776 | * then we'll deadlock if all other kthreads assigned | 776 | * then we'll deadlock if all other kthreads assigned |
777 | * to this channel are blocked in the channel's | 777 | * to this channel are blocked in the channel's |
778 | * registerer, because the only thing that will unblock | 778 | * registerer, because the only thing that will unblock |
779 | * them is the xpcDisconnecting callout that this | 779 | * them is the xpDisconnecting callout that this |
780 | * failed kthread_run() would have made. | 780 | * failed kthread_run() would have made. |
781 | */ | 781 | */ |
782 | 782 | ||
@@ -796,7 +796,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed, | |||
796 | * to function. | 796 | * to function. |
797 | */ | 797 | */ |
798 | spin_lock_irqsave(&ch->lock, irq_flags); | 798 | spin_lock_irqsave(&ch->lock, irq_flags); |
799 | XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources, | 799 | XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources, |
800 | &irq_flags); | 800 | &irq_flags); |
801 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 801 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
802 | } | 802 | } |
@@ -857,7 +857,7 @@ xpc_disconnect_wait(int ch_number) | |||
857 | } | 857 | } |
858 | 858 | ||
859 | static void | 859 | static void |
860 | xpc_do_exit(enum xpc_retval reason) | 860 | xpc_do_exit(enum xp_retval reason) |
861 | { | 861 | { |
862 | partid_t partid; | 862 | partid_t partid; |
863 | int active_part_count, printed_waiting_msg = 0; | 863 | int active_part_count, printed_waiting_msg = 0; |
@@ -955,7 +955,7 @@ xpc_do_exit(enum xpc_retval reason) | |||
955 | del_timer_sync(&xpc_hb_timer); | 955 | del_timer_sync(&xpc_hb_timer); |
956 | DBUG_ON(xpc_vars->heartbeating_to_mask != 0); | 956 | DBUG_ON(xpc_vars->heartbeating_to_mask != 0); |
957 | 957 | ||
958 | if (reason == xpcUnloading) { | 958 | if (reason == xpUnloading) { |
959 | /* take ourselves off of the reboot_notifier_list */ | 959 | /* take ourselves off of the reboot_notifier_list */ |
960 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); | 960 | (void)unregister_reboot_notifier(&xpc_reboot_notifier); |
961 | 961 | ||
@@ -981,20 +981,20 @@ xpc_do_exit(enum xpc_retval reason) | |||
981 | static int | 981 | static int |
982 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) | 982 | xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused) |
983 | { | 983 | { |
984 | enum xpc_retval reason; | 984 | enum xp_retval reason; |
985 | 985 | ||
986 | switch (event) { | 986 | switch (event) { |
987 | case SYS_RESTART: | 987 | case SYS_RESTART: |
988 | reason = xpcSystemReboot; | 988 | reason = xpSystemReboot; |
989 | break; | 989 | break; |
990 | case SYS_HALT: | 990 | case SYS_HALT: |
991 | reason = xpcSystemHalt; | 991 | reason = xpSystemHalt; |
992 | break; | 992 | break; |
993 | case SYS_POWER_OFF: | 993 | case SYS_POWER_OFF: |
994 | reason = xpcSystemPoweroff; | 994 | reason = xpSystemPoweroff; |
995 | break; | 995 | break; |
996 | default: | 996 | default: |
997 | reason = xpcSystemGoingDown; | 997 | reason = xpSystemGoingDown; |
998 | } | 998 | } |
999 | 999 | ||
1000 | xpc_do_exit(reason); | 1000 | xpc_do_exit(reason); |
@@ -1279,7 +1279,7 @@ xpc_init(void) | |||
1279 | /* mark this new thread as a non-starter */ | 1279 | /* mark this new thread as a non-starter */ |
1280 | complete(&xpc_discovery_exited); | 1280 | complete(&xpc_discovery_exited); |
1281 | 1281 | ||
1282 | xpc_do_exit(xpcUnloading); | 1282 | xpc_do_exit(xpUnloading); |
1283 | return -EBUSY; | 1283 | return -EBUSY; |
1284 | } | 1284 | } |
1285 | 1285 | ||
@@ -1297,7 +1297,7 @@ module_init(xpc_init); | |||
1297 | void __exit | 1297 | void __exit |
1298 | xpc_exit(void) | 1298 | xpc_exit(void) |
1299 | { | 1299 | { |
1300 | xpc_do_exit(xpcUnloading); | 1300 | xpc_do_exit(xpUnloading); |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | module_exit(xpc_exit); | 1303 | module_exit(xpc_exit); |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index acd3fd4285d7..d9b462ea29d7 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -444,7 +444,7 @@ xpc_check_remote_hb(void) | |||
444 | (remote_vars->heartbeat_offline == 0)) || | 444 | (remote_vars->heartbeat_offline == 0)) || |
445 | !xpc_hb_allowed(sn_partition_id, remote_vars)) { | 445 | !xpc_hb_allowed(sn_partition_id, remote_vars)) { |
446 | 446 | ||
447 | XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat); | 447 | XPC_DEACTIVATE_PARTITION(part, xpNoHeartbeat); |
448 | continue; | 448 | continue; |
449 | } | 449 | } |
450 | 450 | ||
@@ -459,7 +459,7 @@ xpc_check_remote_hb(void) | |||
459 | * is large enough to contain a copy of their reserved page header and | 459 | * is large enough to contain a copy of their reserved page header and |
460 | * part_nasids mask. | 460 | * part_nasids mask. |
461 | */ | 461 | */ |
462 | static enum xpc_retval | 462 | static enum xp_retval |
463 | xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | 463 | xpc_get_remote_rp(int nasid, u64 *discovered_nasids, |
464 | struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) | 464 | struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) |
465 | { | 465 | { |
@@ -469,7 +469,7 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
469 | 469 | ||
470 | *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); | 470 | *remote_rp_pa = xpc_get_rsvd_page_pa(nasid); |
471 | if (*remote_rp_pa == 0) | 471 | if (*remote_rp_pa == 0) |
472 | return xpcNoRsvdPageAddr; | 472 | return xpNoRsvdPageAddr; |
473 | 473 | ||
474 | /* pull over the reserved page header and part_nasids mask */ | 474 | /* pull over the reserved page header and part_nasids mask */ |
475 | bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, | 475 | bres = xp_bte_copy(*remote_rp_pa, (u64)remote_rp, |
@@ -489,18 +489,18 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
489 | 489 | ||
490 | if (remote_rp->partid < 1 || | 490 | if (remote_rp->partid < 1 || |
491 | remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { | 491 | remote_rp->partid > (XP_MAX_PARTITIONS - 1)) { |
492 | return xpcInvalidPartid; | 492 | return xpInvalidPartid; |
493 | } | 493 | } |
494 | 494 | ||
495 | if (remote_rp->partid == sn_partition_id) | 495 | if (remote_rp->partid == sn_partition_id) |
496 | return xpcLocalPartid; | 496 | return xpLocalPartid; |
497 | 497 | ||
498 | if (XPC_VERSION_MAJOR(remote_rp->version) != | 498 | if (XPC_VERSION_MAJOR(remote_rp->version) != |
499 | XPC_VERSION_MAJOR(XPC_RP_VERSION)) { | 499 | XPC_VERSION_MAJOR(XPC_RP_VERSION)) { |
500 | return xpcBadVersion; | 500 | return xpBadVersion; |
501 | } | 501 | } |
502 | 502 | ||
503 | return xpcSuccess; | 503 | return xpSuccess; |
504 | } | 504 | } |
505 | 505 | ||
506 | /* | 506 | /* |
@@ -509,13 +509,13 @@ xpc_get_remote_rp(int nasid, u64 *discovered_nasids, | |||
509 | * remote_vars points to a buffer that is cacheline aligned for BTE copies and | 509 | * remote_vars points to a buffer that is cacheline aligned for BTE copies and |
510 | * assumed to be of size XPC_RP_VARS_SIZE. | 510 | * assumed to be of size XPC_RP_VARS_SIZE. |
511 | */ | 511 | */ |
512 | static enum xpc_retval | 512 | static enum xp_retval |
513 | xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) | 513 | xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) |
514 | { | 514 | { |
515 | int bres; | 515 | int bres; |
516 | 516 | ||
517 | if (remote_vars_pa == 0) | 517 | if (remote_vars_pa == 0) |
518 | return xpcVarsNotSet; | 518 | return xpVarsNotSet; |
519 | 519 | ||
520 | /* pull over the cross partition variables */ | 520 | /* pull over the cross partition variables */ |
521 | bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, | 521 | bres = xp_bte_copy(remote_vars_pa, (u64)remote_vars, XPC_RP_VARS_SIZE, |
@@ -525,10 +525,10 @@ xpc_get_remote_vars(u64 remote_vars_pa, struct xpc_vars *remote_vars) | |||
525 | 525 | ||
526 | if (XPC_VERSION_MAJOR(remote_vars->version) != | 526 | if (XPC_VERSION_MAJOR(remote_vars->version) != |
527 | XPC_VERSION_MAJOR(XPC_V_VERSION)) { | 527 | XPC_VERSION_MAJOR(XPC_V_VERSION)) { |
528 | return xpcBadVersion; | 528 | return xpBadVersion; |
529 | } | 529 | } |
530 | 530 | ||
531 | return xpcSuccess; | 531 | return xpSuccess; |
532 | } | 532 | } |
533 | 533 | ||
534 | /* | 534 | /* |
@@ -606,14 +606,14 @@ xpc_identify_act_IRQ_req(int nasid) | |||
606 | struct timespec remote_rp_stamp = { 0, 0 }; | 606 | struct timespec remote_rp_stamp = { 0, 0 }; |
607 | partid_t partid; | 607 | partid_t partid; |
608 | struct xpc_partition *part; | 608 | struct xpc_partition *part; |
609 | enum xpc_retval ret; | 609 | enum xp_retval ret; |
610 | 610 | ||
611 | /* pull over the reserved page structure */ | 611 | /* pull over the reserved page structure */ |
612 | 612 | ||
613 | remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; | 613 | remote_rp = (struct xpc_rsvd_page *)xpc_remote_copy_buffer; |
614 | 614 | ||
615 | ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); | 615 | ret = xpc_get_remote_rp(nasid, NULL, remote_rp, &remote_rp_pa); |
616 | if (ret != xpcSuccess) { | 616 | if (ret != xpSuccess) { |
617 | dev_warn(xpc_part, "unable to get reserved page from nasid %d, " | 617 | dev_warn(xpc_part, "unable to get reserved page from nasid %d, " |
618 | "which sent interrupt, reason=%d\n", nasid, ret); | 618 | "which sent interrupt, reason=%d\n", nasid, ret); |
619 | return; | 619 | return; |
@@ -632,7 +632,7 @@ xpc_identify_act_IRQ_req(int nasid) | |||
632 | remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; | 632 | remote_vars = (struct xpc_vars *)xpc_remote_copy_buffer; |
633 | 633 | ||
634 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); | 634 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); |
635 | if (ret != xpcSuccess) { | 635 | if (ret != xpSuccess) { |
636 | 636 | ||
637 | dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " | 637 | dev_warn(xpc_part, "unable to get XPC variables from nasid %d, " |
638 | "which sent interrupt, reason=%d\n", nasid, ret); | 638 | "which sent interrupt, reason=%d\n", nasid, ret); |
@@ -699,7 +699,7 @@ xpc_identify_act_IRQ_req(int nasid) | |||
699 | &remote_rp_stamp, remote_rp_pa, | 699 | &remote_rp_stamp, remote_rp_pa, |
700 | remote_vars_pa, remote_vars); | 700 | remote_vars_pa, remote_vars); |
701 | part->reactivate_nasid = nasid; | 701 | part->reactivate_nasid = nasid; |
702 | XPC_DEACTIVATE_PARTITION(part, xpcReactivating); | 702 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); |
703 | return; | 703 | return; |
704 | } | 704 | } |
705 | 705 | ||
@@ -754,11 +754,11 @@ xpc_identify_act_IRQ_req(int nasid) | |||
754 | 754 | ||
755 | if (reactivate) { | 755 | if (reactivate) { |
756 | part->reactivate_nasid = nasid; | 756 | part->reactivate_nasid = nasid; |
757 | XPC_DEACTIVATE_PARTITION(part, xpcReactivating); | 757 | XPC_DEACTIVATE_PARTITION(part, xpReactivating); |
758 | 758 | ||
759 | } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && | 759 | } else if (XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version) && |
760 | xpc_partition_disengage_requested(1UL << partid)) { | 760 | xpc_partition_disengage_requested(1UL << partid)) { |
761 | XPC_DEACTIVATE_PARTITION(part, xpcOtherGoingDown); | 761 | XPC_DEACTIVATE_PARTITION(part, xpOtherGoingDown); |
762 | } | 762 | } |
763 | } | 763 | } |
764 | 764 | ||
@@ -870,20 +870,20 @@ xpc_partition_disengaged(struct xpc_partition *part) | |||
870 | /* | 870 | /* |
871 | * Mark specified partition as active. | 871 | * Mark specified partition as active. |
872 | */ | 872 | */ |
873 | enum xpc_retval | 873 | enum xp_retval |
874 | xpc_mark_partition_active(struct xpc_partition *part) | 874 | xpc_mark_partition_active(struct xpc_partition *part) |
875 | { | 875 | { |
876 | unsigned long irq_flags; | 876 | unsigned long irq_flags; |
877 | enum xpc_retval ret; | 877 | enum xp_retval ret; |
878 | 878 | ||
879 | dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); | 879 | dev_dbg(xpc_part, "setting partition %d to ACTIVE\n", XPC_PARTID(part)); |
880 | 880 | ||
881 | spin_lock_irqsave(&part->act_lock, irq_flags); | 881 | spin_lock_irqsave(&part->act_lock, irq_flags); |
882 | if (part->act_state == XPC_P_ACTIVATING) { | 882 | if (part->act_state == XPC_P_ACTIVATING) { |
883 | part->act_state = XPC_P_ACTIVE; | 883 | part->act_state = XPC_P_ACTIVE; |
884 | ret = xpcSuccess; | 884 | ret = xpSuccess; |
885 | } else { | 885 | } else { |
886 | DBUG_ON(part->reason == xpcSuccess); | 886 | DBUG_ON(part->reason == xpSuccess); |
887 | ret = part->reason; | 887 | ret = part->reason; |
888 | } | 888 | } |
889 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 889 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
@@ -896,7 +896,7 @@ xpc_mark_partition_active(struct xpc_partition *part) | |||
896 | */ | 896 | */ |
897 | void | 897 | void |
898 | xpc_deactivate_partition(const int line, struct xpc_partition *part, | 898 | xpc_deactivate_partition(const int line, struct xpc_partition *part, |
899 | enum xpc_retval reason) | 899 | enum xp_retval reason) |
900 | { | 900 | { |
901 | unsigned long irq_flags; | 901 | unsigned long irq_flags; |
902 | 902 | ||
@@ -905,15 +905,15 @@ xpc_deactivate_partition(const int line, struct xpc_partition *part, | |||
905 | if (part->act_state == XPC_P_INACTIVE) { | 905 | if (part->act_state == XPC_P_INACTIVE) { |
906 | XPC_SET_REASON(part, reason, line); | 906 | XPC_SET_REASON(part, reason, line); |
907 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 907 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
908 | if (reason == xpcReactivating) { | 908 | if (reason == xpReactivating) { |
909 | /* we interrupt ourselves to reactivate partition */ | 909 | /* we interrupt ourselves to reactivate partition */ |
910 | xpc_IPI_send_reactivate(part); | 910 | xpc_IPI_send_reactivate(part); |
911 | } | 911 | } |
912 | return; | 912 | return; |
913 | } | 913 | } |
914 | if (part->act_state == XPC_P_DEACTIVATING) { | 914 | if (part->act_state == XPC_P_DEACTIVATING) { |
915 | if ((part->reason == xpcUnloading && reason != xpcUnloading) || | 915 | if ((part->reason == xpUnloading && reason != xpUnloading) || |
916 | reason == xpcReactivating) { | 916 | reason == xpReactivating) { |
917 | XPC_SET_REASON(part, reason, line); | 917 | XPC_SET_REASON(part, reason, line); |
918 | } | 918 | } |
919 | spin_unlock_irqrestore(&part->act_lock, irq_flags); | 919 | spin_unlock_irqrestore(&part->act_lock, irq_flags); |
@@ -985,7 +985,7 @@ xpc_discovery(void) | |||
985 | partid_t partid; | 985 | partid_t partid; |
986 | struct xpc_partition *part; | 986 | struct xpc_partition *part; |
987 | u64 *discovered_nasids; | 987 | u64 *discovered_nasids; |
988 | enum xpc_retval ret; | 988 | enum xp_retval ret; |
989 | 989 | ||
990 | remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + | 990 | remote_rp = xpc_kmalloc_cacheline_aligned(XPC_RP_HEADER_SIZE + |
991 | xp_nasid_mask_bytes, | 991 | xp_nasid_mask_bytes, |
@@ -1063,12 +1063,12 @@ xpc_discovery(void) | |||
1063 | 1063 | ||
1064 | ret = xpc_get_remote_rp(nasid, discovered_nasids, | 1064 | ret = xpc_get_remote_rp(nasid, discovered_nasids, |
1065 | remote_rp, &remote_rp_pa); | 1065 | remote_rp, &remote_rp_pa); |
1066 | if (ret != xpcSuccess) { | 1066 | if (ret != xpSuccess) { |
1067 | dev_dbg(xpc_part, "unable to get reserved page " | 1067 | dev_dbg(xpc_part, "unable to get reserved page " |
1068 | "from nasid %d, reason=%d\n", nasid, | 1068 | "from nasid %d, reason=%d\n", nasid, |
1069 | ret); | 1069 | ret); |
1070 | 1070 | ||
1071 | if (ret == xpcLocalPartid) | 1071 | if (ret == xpLocalPartid) |
1072 | break; | 1072 | break; |
1073 | 1073 | ||
1074 | continue; | 1074 | continue; |
@@ -1082,7 +1082,7 @@ xpc_discovery(void) | |||
1082 | /* pull over the cross partition variables */ | 1082 | /* pull over the cross partition variables */ |
1083 | 1083 | ||
1084 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); | 1084 | ret = xpc_get_remote_vars(remote_vars_pa, remote_vars); |
1085 | if (ret != xpcSuccess) { | 1085 | if (ret != xpSuccess) { |
1086 | dev_dbg(xpc_part, "unable to get XPC variables " | 1086 | dev_dbg(xpc_part, "unable to get XPC variables " |
1087 | "from nasid %d, reason=%d\n", nasid, | 1087 | "from nasid %d, reason=%d\n", nasid, |
1088 | ret); | 1088 | ret); |
@@ -1116,7 +1116,7 @@ xpc_discovery(void) | |||
1116 | "register xp_addr region 0x%016lx\n", | 1116 | "register xp_addr region 0x%016lx\n", |
1117 | partid, remote_vars->amos_page_pa); | 1117 | partid, remote_vars->amos_page_pa); |
1118 | 1118 | ||
1119 | XPC_SET_REASON(part, xpcPhysAddrRegFailed, | 1119 | XPC_SET_REASON(part, xpPhysAddrRegFailed, |
1120 | __LINE__); | 1120 | __LINE__); |
1121 | break; | 1121 | break; |
1122 | } | 1122 | } |
@@ -1151,7 +1151,7 @@ xpc_discovery(void) | |||
1151 | * Given a partid, get the nasids owned by that partition from the | 1151 | * Given a partid, get the nasids owned by that partition from the |
1152 | * remote partition's reserved page. | 1152 | * remote partition's reserved page. |
1153 | */ | 1153 | */ |
1154 | enum xpc_retval | 1154 | enum xp_retval |
1155 | xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) | 1155 | xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) |
1156 | { | 1156 | { |
1157 | struct xpc_partition *part; | 1157 | struct xpc_partition *part; |
@@ -1160,7 +1160,7 @@ xpc_initiate_partid_to_nasids(partid_t partid, void *nasid_mask) | |||
1160 | 1160 | ||
1161 | part = &xpc_partitions[partid]; | 1161 | part = &xpc_partitions[partid]; |
1162 | if (part->remote_rp_pa == 0) | 1162 | if (part->remote_rp_pa == 0) |
1163 | return xpcPartitionDown; | 1163 | return xpPartitionDown; |
1164 | 1164 | ||
1165 | memset(nasid_mask, 0, XP_NASID_MASK_BYTES); | 1165 | memset(nasid_mask, 0, XP_NASID_MASK_BYTES); |
1166 | 1166 | ||
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index a9543c65814d..38df16650c5c 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c | |||
@@ -282,7 +282,7 @@ xpnet_receive(partid_t partid, int channel, struct xpnet_message *msg) | |||
282 | * state or message reception on a connection. | 282 | * state or message reception on a connection. |
283 | */ | 283 | */ |
284 | static void | 284 | static void |
285 | xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, | 285 | xpnet_connection_activity(enum xp_retval reason, partid_t partid, int channel, |
286 | void *data, void *key) | 286 | void *data, void *key) |
287 | { | 287 | { |
288 | long bp; | 288 | long bp; |
@@ -291,13 +291,13 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, | |||
291 | DBUG_ON(channel != XPC_NET_CHANNEL); | 291 | DBUG_ON(channel != XPC_NET_CHANNEL); |
292 | 292 | ||
293 | switch (reason) { | 293 | switch (reason) { |
294 | case xpcMsgReceived: /* message received */ | 294 | case xpMsgReceived: /* message received */ |
295 | DBUG_ON(data == NULL); | 295 | DBUG_ON(data == NULL); |
296 | 296 | ||
297 | xpnet_receive(partid, channel, (struct xpnet_message *)data); | 297 | xpnet_receive(partid, channel, (struct xpnet_message *)data); |
298 | break; | 298 | break; |
299 | 299 | ||
300 | case xpcConnected: /* connection completed to a partition */ | 300 | case xpConnected: /* connection completed to a partition */ |
301 | spin_lock_bh(&xpnet_broadcast_lock); | 301 | spin_lock_bh(&xpnet_broadcast_lock); |
302 | xpnet_broadcast_partitions |= 1UL << (partid - 1); | 302 | xpnet_broadcast_partitions |= 1UL << (partid - 1); |
303 | bp = xpnet_broadcast_partitions; | 303 | bp = xpnet_broadcast_partitions; |
@@ -330,7 +330,7 @@ xpnet_connection_activity(enum xpc_retval reason, partid_t partid, int channel, | |||
330 | static int | 330 | static int |
331 | xpnet_dev_open(struct net_device *dev) | 331 | xpnet_dev_open(struct net_device *dev) |
332 | { | 332 | { |
333 | enum xpc_retval ret; | 333 | enum xp_retval ret; |
334 | 334 | ||
335 | dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " | 335 | dev_dbg(xpnet, "calling xpc_connect(%d, 0x%p, NULL, %ld, %ld, %ld, " |
336 | "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, | 336 | "%ld)\n", XPC_NET_CHANNEL, xpnet_connection_activity, |
@@ -340,7 +340,7 @@ xpnet_dev_open(struct net_device *dev) | |||
340 | ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, | 340 | ret = xpc_connect(XPC_NET_CHANNEL, xpnet_connection_activity, NULL, |
341 | XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, | 341 | XPNET_MSG_SIZE, XPNET_MSG_NENTRIES, |
342 | XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); | 342 | XPNET_MAX_KTHREADS, XPNET_MAX_IDLE_KTHREADS); |
343 | if (ret != xpcSuccess) { | 343 | if (ret != xpSuccess) { |
344 | dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " | 344 | dev_err(xpnet, "ifconfig up of %s failed on XPC connect, " |
345 | "ret=%d\n", dev->name, ret); | 345 | "ret=%d\n", dev->name, ret); |
346 | 346 | ||
@@ -407,7 +407,7 @@ xpnet_dev_get_stats(struct net_device *dev) | |||
407 | * release the skb and then release our pending message structure. | 407 | * release the skb and then release our pending message structure. |
408 | */ | 408 | */ |
409 | static void | 409 | static void |
410 | xpnet_send_completed(enum xpc_retval reason, partid_t partid, int channel, | 410 | xpnet_send_completed(enum xp_retval reason, partid_t partid, int channel, |
411 | void *__qm) | 411 | void *__qm) |
412 | { | 412 | { |
413 | struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm; | 413 | struct xpnet_pending_msg *queued_msg = (struct xpnet_pending_msg *)__qm; |
@@ -439,7 +439,7 @@ static int | |||
439 | xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | 439 | xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) |
440 | { | 440 | { |
441 | struct xpnet_pending_msg *queued_msg; | 441 | struct xpnet_pending_msg *queued_msg; |
442 | enum xpc_retval ret; | 442 | enum xp_retval ret; |
443 | struct xpnet_message *msg; | 443 | struct xpnet_message *msg; |
444 | u64 start_addr, end_addr; | 444 | u64 start_addr, end_addr; |
445 | long dp; | 445 | long dp; |
@@ -528,7 +528,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
528 | 528 | ||
529 | ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, | 529 | ret = xpc_allocate(dest_partid, XPC_NET_CHANNEL, |
530 | XPC_NOWAIT, (void **)&msg); | 530 | XPC_NOWAIT, (void **)&msg); |
531 | if (unlikely(ret != xpcSuccess)) | 531 | if (unlikely(ret != xpSuccess)) |
532 | continue; | 532 | continue; |
533 | 533 | ||
534 | msg->embedded_bytes = embedded_bytes; | 534 | msg->embedded_bytes = embedded_bytes; |
@@ -557,7 +557,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
557 | 557 | ||
558 | ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, | 558 | ret = xpc_send_notify(dest_partid, XPC_NET_CHANNEL, msg, |
559 | xpnet_send_completed, queued_msg); | 559 | xpnet_send_completed, queued_msg); |
560 | if (unlikely(ret != xpcSuccess)) { | 560 | if (unlikely(ret != xpSuccess)) { |
561 | atomic_dec(&queued_msg->use_count); | 561 | atomic_dec(&queued_msg->use_count); |
562 | continue; | 562 | continue; |
563 | } | 563 | } |