diff options
author | Dean Nelson <dcn@sgi.com> | 2008-07-30 01:34:16 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-07-30 12:41:50 -0400 |
commit | a812dcc3a298eef650c381e094e2cf41a4ecc9ad (patch) | |
tree | cb6a24a623891054789f5340742c3c8ff1fb8cde /drivers | |
parent | 261f3b4979db88d29fc86aad9f76fbc0c2c6d21a (diff) |
sgi-xp: add usage of GRU driver by xpc_remote_memcpy()
Add UV support to xpc_remote_memcpy(), which involves interfacing to the
GRU driver.
Signed-off-by: Dean Nelson <dcn@sgi.com>
Cc: Jack Steiner <steiner@sgi.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/misc/sgi-xp/xp.h | 8 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_main.c | 6 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_sn2.c | 50 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xp_uv.c | 27 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc.h | 44 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_channel.c | 5 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_main.c | 8 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_partition.c | 37 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_sn2.c | 68 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 2 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpnet.c | 26 |
11 files changed, 154 insertions, 127 deletions
diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h index 01bf1a2cd8ed..45d0a08c2ddd 100644 --- a/drivers/misc/sgi-xp/xp.h +++ b/drivers/misc/sgi-xp/xp.h | |||
@@ -207,7 +207,9 @@ enum xp_retval { | |||
207 | xpUnsupported, /* 56: unsupported functionality or resource */ | 207 | xpUnsupported, /* 56: unsupported functionality or resource */ |
208 | xpNeedMoreInfo, /* 57: more info is needed by SAL */ | 208 | xpNeedMoreInfo, /* 57: more info is needed by SAL */ |
209 | 209 | ||
210 | xpUnknownReason /* 58: unknown reason - must be last in enum */ | 210 | xpGruCopyError, /* 58: gru_copy_gru() returned error */ |
211 | |||
212 | xpUnknownReason /* 59: unknown reason - must be last in enum */ | ||
211 | }; | 213 | }; |
212 | 214 | ||
213 | /* | 215 | /* |
@@ -349,7 +351,9 @@ extern short xp_max_npartitions; | |||
349 | extern short xp_partition_id; | 351 | extern short xp_partition_id; |
350 | extern u8 xp_region_size; | 352 | extern u8 xp_region_size; |
351 | 353 | ||
352 | extern enum xp_retval (*xp_remote_memcpy) (void *, const void *, size_t); | 354 | extern unsigned long (*xp_pa) (void *); |
355 | extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long, | ||
356 | size_t); | ||
353 | extern int (*xp_cpu_to_nasid) (int); | 357 | extern int (*xp_cpu_to_nasid) (int); |
354 | 358 | ||
355 | extern u64 xp_nofault_PIOR_target; | 359 | extern u64 xp_nofault_PIOR_target; |
diff --git a/drivers/misc/sgi-xp/xp_main.c b/drivers/misc/sgi-xp/xp_main.c index c34b23fe498f..f86ad3af26b7 100644 --- a/drivers/misc/sgi-xp/xp_main.c +++ b/drivers/misc/sgi-xp/xp_main.c | |||
@@ -41,7 +41,11 @@ EXPORT_SYMBOL_GPL(xp_partition_id); | |||
41 | u8 xp_region_size; | 41 | u8 xp_region_size; |
42 | EXPORT_SYMBOL_GPL(xp_region_size); | 42 | EXPORT_SYMBOL_GPL(xp_region_size); |
43 | 43 | ||
44 | enum xp_retval (*xp_remote_memcpy) (void *dst, const void *src, size_t len); | 44 | unsigned long (*xp_pa) (void *addr); |
45 | EXPORT_SYMBOL_GPL(xp_pa); | ||
46 | |||
47 | enum xp_retval (*xp_remote_memcpy) (unsigned long dst_gpa, | ||
48 | const unsigned long src_gpa, size_t len); | ||
45 | EXPORT_SYMBOL_GPL(xp_remote_memcpy); | 49 | EXPORT_SYMBOL_GPL(xp_remote_memcpy); |
46 | 50 | ||
47 | int (*xp_cpu_to_nasid) (int cpuid); | 51 | int (*xp_cpu_to_nasid) (int cpuid); |
diff --git a/drivers/misc/sgi-xp/xp_sn2.c b/drivers/misc/sgi-xp/xp_sn2.c index c6a1ede7d6e6..1440134caf31 100644 --- a/drivers/misc/sgi-xp/xp_sn2.c +++ b/drivers/misc/sgi-xp/xp_sn2.c | |||
@@ -63,7 +63,7 @@ xp_register_nofault_code_sn2(void) | |||
63 | return xpSuccess; | 63 | return xpSuccess; |
64 | } | 64 | } |
65 | 65 | ||
66 | void | 66 | static void |
67 | xp_unregister_nofault_code_sn2(void) | 67 | xp_unregister_nofault_code_sn2(void) |
68 | { | 68 | { |
69 | u64 func_addr = *(u64 *)xp_nofault_PIOR; | 69 | u64 func_addr = *(u64 *)xp_nofault_PIOR; |
@@ -75,44 +75,41 @@ xp_unregister_nofault_code_sn2(void) | |||
75 | } | 75 | } |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * Convert a virtual memory address to a physical memory address. | ||
79 | */ | ||
80 | static unsigned long | ||
81 | xp_pa_sn2(void *addr) | ||
82 | { | ||
83 | return __pa(addr); | ||
84 | } | ||
85 | |||
86 | /* | ||
78 | * Wrapper for bte_copy(). | 87 | * Wrapper for bte_copy(). |
79 | * | 88 | * |
80 | * vdst - virtual address of the destination of the transfer. | 89 | * dst_pa - physical address of the destination of the transfer. |
81 | * psrc - physical address of the source of the transfer. | 90 | * src_pa - physical address of the source of the transfer. |
82 | * len - number of bytes to transfer from source to destination. | 91 | * len - number of bytes to transfer from source to destination. |
83 | * | 92 | * |
84 | * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock. | 93 | * Note: xp_remote_memcpy_sn2() should never be called while holding a spinlock. |
85 | */ | 94 | */ |
86 | static enum xp_retval | 95 | static enum xp_retval |
87 | xp_remote_memcpy_sn2(void *vdst, const void *psrc, size_t len) | 96 | xp_remote_memcpy_sn2(unsigned long dst_pa, const unsigned long src_pa, |
97 | size_t len) | ||
88 | { | 98 | { |
89 | bte_result_t ret; | 99 | bte_result_t ret; |
90 | u64 pdst = ia64_tpa(vdst); | ||
91 | /* ??? What are the rules governing the src and dst addresses passed in? | ||
92 | * ??? Currently we're assuming that dst is a virtual address and src | ||
93 | * ??? is a physical address, is this appropriate? Can we allow them to | ||
94 | * ??? be whatever and we make the change here without damaging the | ||
95 | * ??? addresses? | ||
96 | */ | ||
97 | 100 | ||
98 | /* | 101 | ret = bte_copy(src_pa, dst_pa, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); |
99 | * Ensure that the physically mapped memory is contiguous. | ||
100 | * | ||
101 | * We do this by ensuring that the memory is from region 7 only. | ||
102 | * If the need should arise to use memory from one of the other | ||
103 | * regions, then modify the BUG_ON() statement to ensure that the | ||
104 | * memory from that region is always physically contiguous. | ||
105 | */ | ||
106 | BUG_ON(REGION_NUMBER(vdst) != RGN_KERNEL); | ||
107 | |||
108 | ret = bte_copy((u64)psrc, pdst, len, (BTE_NOTIFY | BTE_WACQUIRE), NULL); | ||
109 | if (ret == BTE_SUCCESS) | 102 | if (ret == BTE_SUCCESS) |
110 | return xpSuccess; | 103 | return xpSuccess; |
111 | 104 | ||
112 | if (is_shub2()) | 105 | if (is_shub2()) { |
113 | dev_err(xp, "bte_copy() on shub2 failed, error=0x%x\n", ret); | 106 | dev_err(xp, "bte_copy() on shub2 failed, error=0x%x dst_pa=" |
114 | else | 107 | "0x%016lx src_pa=0x%016lx len=%ld\\n", ret, dst_pa, |
115 | dev_err(xp, "bte_copy() failed, error=%d\n", ret); | 108 | src_pa, len); |
109 | } else { | ||
110 | dev_err(xp, "bte_copy() failed, error=%d dst_pa=0x%016lx " | ||
111 | "src_pa=0x%016lx len=%ld\\n", ret, dst_pa, src_pa, len); | ||
112 | } | ||
116 | 113 | ||
117 | return xpBteCopyError; | 114 | return xpBteCopyError; |
118 | } | 115 | } |
@@ -132,6 +129,7 @@ xp_init_sn2(void) | |||
132 | xp_partition_id = sn_partition_id; | 129 | xp_partition_id = sn_partition_id; |
133 | xp_region_size = sn_region_size; | 130 | xp_region_size = sn_region_size; |
134 | 131 | ||
132 | xp_pa = xp_pa_sn2; | ||
135 | xp_remote_memcpy = xp_remote_memcpy_sn2; | 133 | xp_remote_memcpy = xp_remote_memcpy_sn2; |
136 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; | 134 | xp_cpu_to_nasid = xp_cpu_to_nasid_sn2; |
137 | 135 | ||
diff --git a/drivers/misc/sgi-xp/xp_uv.c b/drivers/misc/sgi-xp/xp_uv.c index 382b1b6bcc0b..44f2c2b58c2f 100644 --- a/drivers/misc/sgi-xp/xp_uv.c +++ b/drivers/misc/sgi-xp/xp_uv.c | |||
@@ -13,13 +13,33 @@ | |||
13 | * | 13 | * |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/device.h> | ||
17 | #include <asm/uv/uv_hub.h> | ||
18 | #include "../sgi-gru/grukservices.h" | ||
16 | #include "xp.h" | 19 | #include "xp.h" |
17 | 20 | ||
21 | /* | ||
22 | * Convert a virtual memory address to a physical memory address. | ||
23 | */ | ||
24 | static unsigned long | ||
25 | xp_pa_uv(void *addr) | ||
26 | { | ||
27 | return uv_gpa(addr); | ||
28 | } | ||
29 | |||
18 | static enum xp_retval | 30 | static enum xp_retval |
19 | xp_remote_memcpy_uv(void *vdst, const void *psrc, size_t len) | 31 | xp_remote_memcpy_uv(unsigned long dst_gpa, const unsigned long src_gpa, |
32 | size_t len) | ||
20 | { | 33 | { |
21 | /* !!! this function needs fleshing out */ | 34 | int ret; |
22 | return xpUnsupported; | 35 | |
36 | ret = gru_copy_gpa(dst_gpa, src_gpa, len); | ||
37 | if (ret == 0) | ||
38 | return xpSuccess; | ||
39 | |||
40 | dev_err(xp, "gru_copy_gpa() failed, dst_gpa=0x%016lx src_gpa=0x%016lx " | ||
41 | "len=%ld\n", dst_gpa, src_gpa, len); | ||
42 | return xpGruCopyError; | ||
23 | } | 43 | } |
24 | 44 | ||
25 | enum xp_retval | 45 | enum xp_retval |
@@ -29,6 +49,7 @@ xp_init_uv(void) | |||
29 | 49 | ||
30 | xp_max_npartitions = XP_MAX_NPARTITIONS_UV; | 50 | xp_max_npartitions = XP_MAX_NPARTITIONS_UV; |
31 | 51 | ||
52 | xp_pa = xp_pa_uv; | ||
32 | xp_remote_memcpy = xp_remote_memcpy_uv; | 53 | xp_remote_memcpy = xp_remote_memcpy_uv; |
33 | 54 | ||
34 | return xpSuccess; | 55 | return xpSuccess; |
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h index 96408fcf5a1e..49e26993345b 100644 --- a/drivers/misc/sgi-xp/xpc.h +++ b/drivers/misc/sgi-xp/xpc.h | |||
@@ -91,8 +91,8 @@ struct xpc_rsvd_page { | |||
91 | u8 version; | 91 | u8 version; |
92 | u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ | 92 | u8 pad1[3]; /* align to next u64 in 1st 64-byte cacheline */ |
93 | union { | 93 | union { |
94 | u64 vars_pa; /* physical address of struct xpc_vars */ | 94 | unsigned long vars_pa; /* phys address of struct xpc_vars */ |
95 | u64 activate_mq_gpa; /* global phys address of activate_mq */ | 95 | unsigned long activate_mq_gpa; /* gru phy addr of activate_mq */ |
96 | } sn; | 96 | } sn; |
97 | unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ | 97 | unsigned long ts_jiffies; /* timestamp when rsvd pg was setup by XPC */ |
98 | u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ | 98 | u64 pad2[10]; /* align to last u64 in 2nd 64-byte cacheline */ |
@@ -122,8 +122,8 @@ struct xpc_vars_sn2 { | |||
122 | u64 heartbeat_offline; /* if 0, heartbeat should be changing */ | 122 | u64 heartbeat_offline; /* if 0, heartbeat should be changing */ |
123 | int activate_IRQ_nasid; | 123 | int activate_IRQ_nasid; |
124 | int activate_IRQ_phys_cpuid; | 124 | int activate_IRQ_phys_cpuid; |
125 | u64 vars_part_pa; | 125 | unsigned long vars_part_pa; |
126 | u64 amos_page_pa; /* paddr of page of amos from MSPEC driver */ | 126 | unsigned long amos_page_pa;/* paddr of page of amos from MSPEC driver */ |
127 | struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */ | 127 | struct amo *amos_page; /* vaddr of page of amos from MSPEC driver */ |
128 | }; | 128 | }; |
129 | 129 | ||
@@ -142,10 +142,10 @@ struct xpc_vars_sn2 { | |||
142 | struct xpc_vars_part_sn2 { | 142 | struct xpc_vars_part_sn2 { |
143 | u64 magic; | 143 | u64 magic; |
144 | 144 | ||
145 | u64 openclose_args_pa; /* physical address of open and close args */ | 145 | unsigned long openclose_args_pa; /* phys addr of open and close args */ |
146 | u64 GPs_pa; /* physical address of Get/Put values */ | 146 | unsigned long GPs_pa; /* physical address of Get/Put values */ |
147 | 147 | ||
148 | u64 chctl_amo_pa; /* physical address of chctl flags' amo */ | 148 | unsigned long chctl_amo_pa; /* physical address of chctl flags' amo */ |
149 | 149 | ||
150 | int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ | 150 | int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ |
151 | int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ | 151 | int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ |
@@ -213,7 +213,7 @@ struct xpc_openclose_args { | |||
213 | u16 msg_size; /* sizeof each message entry */ | 213 | u16 msg_size; /* sizeof each message entry */ |
214 | u16 remote_nentries; /* #of message entries in remote msg queue */ | 214 | u16 remote_nentries; /* #of message entries in remote msg queue */ |
215 | u16 local_nentries; /* #of message entries in local msg queue */ | 215 | u16 local_nentries; /* #of message entries in local msg queue */ |
216 | u64 local_msgqueue_pa; /* physical address of local message queue */ | 216 | unsigned long local_msgqueue_pa; /* phys addr of local message queue */ |
217 | }; | 217 | }; |
218 | 218 | ||
219 | #define XPC_OPENCLOSE_ARGS_SIZE \ | 219 | #define XPC_OPENCLOSE_ARGS_SIZE \ |
@@ -366,8 +366,8 @@ struct xpc_channel { | |||
366 | void *remote_msgqueue_base; /* base address of kmalloc'd space */ | 366 | void *remote_msgqueue_base; /* base address of kmalloc'd space */ |
367 | struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ | 367 | struct xpc_msg *remote_msgqueue; /* cached copy of remote partition's */ |
368 | /* local message queue */ | 368 | /* local message queue */ |
369 | u64 remote_msgqueue_pa; /* phys addr of remote partition's */ | 369 | unsigned long remote_msgqueue_pa; /* phys addr of remote partition's */ |
370 | /* local message queue */ | 370 | /* local message queue */ |
371 | 371 | ||
372 | atomic_t references; /* #of external references to queues */ | 372 | atomic_t references; /* #of external references to queues */ |
373 | 373 | ||
@@ -491,12 +491,12 @@ xpc_any_msg_chctl_flags_set(union xpc_channel_ctl_flags *chctl) | |||
491 | */ | 491 | */ |
492 | 492 | ||
493 | struct xpc_partition_sn2 { | 493 | struct xpc_partition_sn2 { |
494 | u64 remote_amos_page_pa; /* phys addr of partition's amos page */ | 494 | unsigned long remote_amos_page_pa; /* paddr of partition's amos page */ |
495 | int activate_IRQ_nasid; /* active partition's act/deact nasid */ | 495 | int activate_IRQ_nasid; /* active partition's act/deact nasid */ |
496 | int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */ | 496 | int activate_IRQ_phys_cpuid; /* active part's act/deact phys cpuid */ |
497 | 497 | ||
498 | u64 remote_vars_pa; /* phys addr of partition's vars */ | 498 | unsigned long remote_vars_pa; /* phys addr of partition's vars */ |
499 | u64 remote_vars_part_pa; /* phys addr of partition's vars part */ | 499 | unsigned long remote_vars_part_pa; /* paddr of partition's vars part */ |
500 | u8 remote_vars_version; /* version# of partition's vars */ | 500 | u8 remote_vars_version; /* version# of partition's vars */ |
501 | 501 | ||
502 | void *local_GPs_base; /* base address of kmalloc'd space */ | 502 | void *local_GPs_base; /* base address of kmalloc'd space */ |
@@ -504,10 +504,10 @@ struct xpc_partition_sn2 { | |||
504 | void *remote_GPs_base; /* base address of kmalloc'd space */ | 504 | void *remote_GPs_base; /* base address of kmalloc'd space */ |
505 | struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */ | 505 | struct xpc_gp_sn2 *remote_GPs; /* copy of remote partition's local */ |
506 | /* Get/Put values */ | 506 | /* Get/Put values */ |
507 | u64 remote_GPs_pa; /* phys address of remote partition's local */ | 507 | unsigned long remote_GPs_pa; /* phys addr of remote partition's local */ |
508 | /* Get/Put values */ | 508 | /* Get/Put values */ |
509 | 509 | ||
510 | u64 remote_openclose_args_pa; /* phys addr of remote's args */ | 510 | unsigned long remote_openclose_args_pa; /* phys addr of remote's args */ |
511 | 511 | ||
512 | int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ | 512 | int notify_IRQ_nasid; /* nasid of where to send notify IRQs */ |
513 | int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ | 513 | int notify_IRQ_phys_cpuid; /* CPUID of where to send notify IRQs */ |
@@ -529,7 +529,7 @@ struct xpc_partition { | |||
529 | 529 | ||
530 | u8 remote_rp_version; /* version# of partition's rsvd pg */ | 530 | u8 remote_rp_version; /* version# of partition's rsvd pg */ |
531 | unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */ | 531 | unsigned long remote_rp_ts_jiffies; /* timestamp when rsvd pg setup */ |
532 | u64 remote_rp_pa; /* phys addr of partition's rsvd pg */ | 532 | unsigned long remote_rp_pa; /* phys addr of partition's rsvd pg */ |
533 | u64 last_heartbeat; /* HB at last read */ | 533 | u64 last_heartbeat; /* HB at last read */ |
534 | u32 activate_IRQ_rcvd; /* IRQs since activation */ | 534 | u32 activate_IRQ_rcvd; /* IRQs since activation */ |
535 | spinlock_t act_lock; /* protect updating of act_state */ | 535 | spinlock_t act_lock; /* protect updating of act_state */ |
@@ -623,7 +623,8 @@ extern void xpc_activate_partition(struct xpc_partition *); | |||
623 | extern void xpc_activate_kthreads(struct xpc_channel *, int); | 623 | extern void xpc_activate_kthreads(struct xpc_channel *, int); |
624 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); | 624 | extern void xpc_create_kthreads(struct xpc_channel *, int, int); |
625 | extern void xpc_disconnect_wait(int); | 625 | extern void xpc_disconnect_wait(int); |
626 | extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64, u64 *, u64 *, | 626 | extern enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *, u64 *, |
627 | unsigned long *, | ||
627 | size_t *); | 628 | size_t *); |
628 | extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); | 629 | extern enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *); |
629 | extern void (*xpc_heartbeat_init) (void); | 630 | extern void (*xpc_heartbeat_init) (void); |
@@ -640,8 +641,8 @@ extern void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *); | |||
640 | extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); | 641 | extern void (*xpc_process_msg_chctl_flags) (struct xpc_partition *, int); |
641 | extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *); | 642 | extern int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *); |
642 | extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); | 643 | extern struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *); |
643 | extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, u64, | 644 | extern void (*xpc_request_partition_activation) (struct xpc_rsvd_page *, |
644 | int); | 645 | unsigned long, int); |
645 | extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); | 646 | extern void (*xpc_request_partition_reactivation) (struct xpc_partition *); |
646 | extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); | 647 | extern void (*xpc_request_partition_deactivation) (struct xpc_partition *); |
647 | extern void (*xpc_cancel_partition_deactivation_request) ( | 648 | extern void (*xpc_cancel_partition_deactivation_request) ( |
@@ -690,7 +691,8 @@ extern enum xp_retval xpc_mark_partition_active(struct xpc_partition *); | |||
690 | extern void xpc_mark_partition_inactive(struct xpc_partition *); | 691 | extern void xpc_mark_partition_inactive(struct xpc_partition *); |
691 | extern void xpc_discovery(void); | 692 | extern void xpc_discovery(void); |
692 | extern enum xp_retval xpc_get_remote_rp(int, unsigned long *, | 693 | extern enum xp_retval xpc_get_remote_rp(int, unsigned long *, |
693 | struct xpc_rsvd_page *, u64 *); | 694 | struct xpc_rsvd_page *, |
695 | unsigned long *); | ||
694 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, | 696 | extern void xpc_deactivate_partition(const int, struct xpc_partition *, |
695 | enum xp_retval); | 697 | enum xp_retval); |
696 | extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); | 698 | extern enum xp_retval xpc_initiate_partid_to_nasids(short, void *); |
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c index 0615efbe0070..d7a15f1a78a5 100644 --- a/drivers/misc/sgi-xp/xpc_channel.c +++ b/drivers/misc/sgi-xp/xpc_channel.c | |||
@@ -366,9 +366,8 @@ again: | |||
366 | dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" | 366 | dev_dbg(xpc_chan, "XPC_CHCTL_OPENREPLY (local_msgqueue_pa=" |
367 | "0x%lx, local_nentries=%d, remote_nentries=%d) " | 367 | "0x%lx, local_nentries=%d, remote_nentries=%d) " |
368 | "received from partid=%d, channel=%d\n", | 368 | "received from partid=%d, channel=%d\n", |
369 | (unsigned long)args->local_msgqueue_pa, | 369 | args->local_msgqueue_pa, args->local_nentries, |
370 | args->local_nentries, args->remote_nentries, | 370 | args->remote_nentries, ch->partid, ch->number); |
371 | ch->partid, ch->number); | ||
372 | 371 | ||
373 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { | 372 | if (ch->flags & (XPC_C_DISCONNECTING | XPC_C_DISCONNECTED)) { |
374 | spin_unlock_irqrestore(&ch->lock, irq_flags); | 373 | spin_unlock_irqrestore(&ch->lock, irq_flags); |
diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index f7478cc3572d..dc686110aef7 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c | |||
@@ -169,8 +169,9 @@ static struct notifier_block xpc_die_notifier = { | |||
169 | .notifier_call = xpc_system_die, | 169 | .notifier_call = xpc_system_die, |
170 | }; | 170 | }; |
171 | 171 | ||
172 | enum xp_retval (*xpc_get_partition_rsvd_page_pa) (u64 buf, u64 *cookie, | 172 | enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie, |
173 | u64 *paddr, size_t *len); | 173 | unsigned long *rp_pa, |
174 | size_t *len); | ||
174 | enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); | 175 | enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp); |
175 | void (*xpc_heartbeat_init) (void); | 176 | void (*xpc_heartbeat_init) (void); |
176 | void (*xpc_heartbeat_exit) (void); | 177 | void (*xpc_heartbeat_exit) (void); |
@@ -189,7 +190,8 @@ int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch); | |||
189 | struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); | 190 | struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch); |
190 | 191 | ||
191 | void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, | 192 | void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp, |
192 | u64 remote_rp_pa, int nasid); | 193 | unsigned long remote_rp_pa, |
194 | int nasid); | ||
193 | void (*xpc_request_partition_reactivation) (struct xpc_partition *part); | 195 | void (*xpc_request_partition_reactivation) (struct xpc_partition *part); |
194 | void (*xpc_request_partition_deactivation) (struct xpc_partition *part); | 196 | void (*xpc_request_partition_deactivation) (struct xpc_partition *part); |
195 | void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); | 197 | void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part); |
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c index f84d66410205..f150dbfcfcc7 100644 --- a/drivers/misc/sgi-xp/xpc_partition.c +++ b/drivers/misc/sgi-xp/xpc_partition.c | |||
@@ -60,15 +60,15 @@ xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base) | |||
60 | * Given a nasid, get the physical address of the partition's reserved page | 60 | * Given a nasid, get the physical address of the partition's reserved page |
61 | * for that nasid. This function returns 0 on any error. | 61 | * for that nasid. This function returns 0 on any error. |
62 | */ | 62 | */ |
63 | static u64 | 63 | static unsigned long |
64 | xpc_get_rsvd_page_pa(int nasid) | 64 | xpc_get_rsvd_page_pa(int nasid) |
65 | { | 65 | { |
66 | enum xp_retval ret; | 66 | enum xp_retval ret; |
67 | u64 cookie = 0; | 67 | u64 cookie = 0; |
68 | u64 rp_pa = nasid; /* seed with nasid */ | 68 | unsigned long rp_pa = nasid; /* seed with nasid */ |
69 | size_t len = 0; | 69 | size_t len = 0; |
70 | u64 buf = buf; | 70 | size_t buf_len = 0; |
71 | u64 buf_len = 0; | 71 | void *buf = buf; |
72 | void *buf_base = NULL; | 72 | void *buf_base = NULL; |
73 | 73 | ||
74 | while (1) { | 74 | while (1) { |
@@ -78,7 +78,7 @@ xpc_get_rsvd_page_pa(int nasid) | |||
78 | 78 | ||
79 | dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " | 79 | dev_dbg(xpc_part, "SAL returned with ret=%d, cookie=0x%016lx, " |
80 | "address=0x%016lx, len=0x%016lx\n", ret, | 80 | "address=0x%016lx, len=0x%016lx\n", ret, |
81 | (unsigned long)cookie, (unsigned long)rp_pa, len); | 81 | (unsigned long)cookie, rp_pa, len); |
82 | 82 | ||
83 | if (ret != xpNeedMoreInfo) | 83 | if (ret != xpNeedMoreInfo) |
84 | break; | 84 | break; |
@@ -87,19 +87,17 @@ xpc_get_rsvd_page_pa(int nasid) | |||
87 | if (L1_CACHE_ALIGN(len) > buf_len) { | 87 | if (L1_CACHE_ALIGN(len) > buf_len) { |
88 | kfree(buf_base); | 88 | kfree(buf_base); |
89 | buf_len = L1_CACHE_ALIGN(len); | 89 | buf_len = L1_CACHE_ALIGN(len); |
90 | buf = (u64)xpc_kmalloc_cacheline_aligned(buf_len, | 90 | buf = xpc_kmalloc_cacheline_aligned(buf_len, GFP_KERNEL, |
91 | GFP_KERNEL, | 91 | &buf_base); |
92 | &buf_base); | ||
93 | if (buf_base == NULL) { | 92 | if (buf_base == NULL) { |
94 | dev_err(xpc_part, "unable to kmalloc " | 93 | dev_err(xpc_part, "unable to kmalloc " |
95 | "len=0x%016lx\n", | 94 | "len=0x%016lx\n", buf_len); |
96 | (unsigned long)buf_len); | ||
97 | ret = xpNoMemory; | 95 | ret = xpNoMemory; |
98 | break; | 96 | break; |
99 | } | 97 | } |
100 | } | 98 | } |
101 | 99 | ||
102 | ret = xp_remote_memcpy((void *)buf, (void *)rp_pa, buf_len); | 100 | ret = xp_remote_memcpy(xp_pa(buf), rp_pa, buf_len); |
103 | if (ret != xpSuccess) { | 101 | if (ret != xpSuccess) { |
104 | dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); | 102 | dev_dbg(xpc_part, "xp_remote_memcpy failed %d\n", ret); |
105 | break; | 103 | break; |
@@ -111,8 +109,7 @@ xpc_get_rsvd_page_pa(int nasid) | |||
111 | if (ret != xpSuccess) | 109 | if (ret != xpSuccess) |
112 | rp_pa = 0; | 110 | rp_pa = 0; |
113 | 111 | ||
114 | dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", | 112 | dev_dbg(xpc_part, "reserved page at phys address 0x%016lx\n", rp_pa); |
115 | (unsigned long)rp_pa); | ||
116 | return rp_pa; | 113 | return rp_pa; |
117 | } | 114 | } |
118 | 115 | ||
@@ -125,7 +122,7 @@ struct xpc_rsvd_page * | |||
125 | xpc_setup_rsvd_page(void) | 122 | xpc_setup_rsvd_page(void) |
126 | { | 123 | { |
127 | struct xpc_rsvd_page *rp; | 124 | struct xpc_rsvd_page *rp; |
128 | u64 rp_pa; | 125 | unsigned long rp_pa; |
129 | unsigned long new_ts_jiffies; | 126 | unsigned long new_ts_jiffies; |
130 | 127 | ||
131 | /* get the local reserved page's address */ | 128 | /* get the local reserved page's address */ |
@@ -193,7 +190,7 @@ xpc_setup_rsvd_page(void) | |||
193 | */ | 190 | */ |
194 | enum xp_retval | 191 | enum xp_retval |
195 | xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, | 192 | xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, |
196 | struct xpc_rsvd_page *remote_rp, u64 *remote_rp_pa) | 193 | struct xpc_rsvd_page *remote_rp, unsigned long *remote_rp_pa) |
197 | { | 194 | { |
198 | int l; | 195 | int l; |
199 | enum xp_retval ret; | 196 | enum xp_retval ret; |
@@ -205,7 +202,7 @@ xpc_get_remote_rp(int nasid, unsigned long *discovered_nasids, | |||
205 | return xpNoRsvdPageAddr; | 202 | return xpNoRsvdPageAddr; |
206 | 203 | ||
207 | /* pull over the reserved page header and part_nasids mask */ | 204 | /* pull over the reserved page header and part_nasids mask */ |
208 | ret = xp_remote_memcpy(remote_rp, (void *)*remote_rp_pa, | 205 | ret = xp_remote_memcpy(xp_pa(remote_rp), *remote_rp_pa, |
209 | XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); | 206 | XPC_RP_HEADER_SIZE + xpc_nasid_mask_nbytes); |
210 | if (ret != xpSuccess) | 207 | if (ret != xpSuccess) |
211 | return ret; | 208 | return ret; |
@@ -389,7 +386,7 @@ xpc_discovery(void) | |||
389 | { | 386 | { |
390 | void *remote_rp_base; | 387 | void *remote_rp_base; |
391 | struct xpc_rsvd_page *remote_rp; | 388 | struct xpc_rsvd_page *remote_rp; |
392 | u64 remote_rp_pa; | 389 | unsigned long remote_rp_pa; |
393 | int region; | 390 | int region; |
394 | int region_size; | 391 | int region_size; |
395 | int max_regions; | 392 | int max_regions; |
@@ -500,7 +497,7 @@ enum xp_retval | |||
500 | xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) | 497 | xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) |
501 | { | 498 | { |
502 | struct xpc_partition *part; | 499 | struct xpc_partition *part; |
503 | u64 part_nasid_pa; | 500 | unsigned long part_nasid_pa; |
504 | 501 | ||
505 | part = &xpc_partitions[partid]; | 502 | part = &xpc_partitions[partid]; |
506 | if (part->remote_rp_pa == 0) | 503 | if (part->remote_rp_pa == 0) |
@@ -508,8 +505,8 @@ xpc_initiate_partid_to_nasids(short partid, void *nasid_mask) | |||
508 | 505 | ||
509 | memset(nasid_mask, 0, xpc_nasid_mask_nbytes); | 506 | memset(nasid_mask, 0, xpc_nasid_mask_nbytes); |
510 | 507 | ||
511 | part_nasid_pa = (u64)XPC_RP_PART_NASIDS(part->remote_rp_pa); | 508 | part_nasid_pa = (unsigned long)XPC_RP_PART_NASIDS(part->remote_rp_pa); |
512 | 509 | ||
513 | return xp_remote_memcpy(nasid_mask, (void *)part_nasid_pa, | 510 | return xp_remote_memcpy(xp_pa(nasid_mask), part_nasid_pa, |
514 | xpc_nasid_mask_nbytes); | 511 | xpc_nasid_mask_nbytes); |
515 | } | 512 | } |
diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index fde870aebcb9..1571a7cdf9d0 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c | |||
@@ -207,8 +207,8 @@ xpc_handle_activate_IRQ_sn2(int irq, void *dev_id) | |||
207 | * Flag the appropriate amo variable and send an IRQ to the specified node. | 207 | * Flag the appropriate amo variable and send an IRQ to the specified node. |
208 | */ | 208 | */ |
209 | static void | 209 | static void |
210 | xpc_send_activate_IRQ_sn2(u64 amos_page_pa, int from_nasid, int to_nasid, | 210 | xpc_send_activate_IRQ_sn2(unsigned long amos_page_pa, int from_nasid, |
211 | int to_phys_cpuid) | 211 | int to_nasid, int to_phys_cpuid) |
212 | { | 212 | { |
213 | struct amo *amos = (struct amo *)__va(amos_page_pa + | 213 | struct amo *amos = (struct amo *)__va(amos_page_pa + |
214 | (XPC_ACTIVATE_IRQ_AMOS_SN2 * | 214 | (XPC_ACTIVATE_IRQ_AMOS_SN2 * |
@@ -404,7 +404,7 @@ xpc_send_chctl_openreply_sn2(struct xpc_channel *ch, unsigned long *irq_flags) | |||
404 | 404 | ||
405 | args->remote_nentries = ch->remote_nentries; | 405 | args->remote_nentries = ch->remote_nentries; |
406 | args->local_nentries = ch->local_nentries; | 406 | args->local_nentries = ch->local_nentries; |
407 | args->local_msgqueue_pa = __pa(ch->local_msgqueue); | 407 | args->local_msgqueue_pa = xp_pa(ch->local_msgqueue); |
408 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); | 408 | XPC_SEND_NOTIFY_IRQ_SN2(ch, XPC_CHCTL_OPENREPLY, irq_flags); |
409 | } | 409 | } |
410 | 410 | ||
@@ -577,13 +577,13 @@ xpc_allow_amo_ops_shub_wars_1_1_sn2(void) | |||
577 | } | 577 | } |
578 | 578 | ||
579 | static enum xp_retval | 579 | static enum xp_retval |
580 | xpc_get_partition_rsvd_page_pa_sn2(u64 buf, u64 *cookie, u64 *paddr, | 580 | xpc_get_partition_rsvd_page_pa_sn2(void *buf, u64 *cookie, unsigned long *rp_pa, |
581 | size_t *len) | 581 | size_t *len) |
582 | { | 582 | { |
583 | s64 status; | 583 | s64 status; |
584 | enum xp_retval ret; | 584 | enum xp_retval ret; |
585 | 585 | ||
586 | status = sn_partition_reserved_page_pa(buf, cookie, paddr, len); | 586 | status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len); |
587 | if (status == SALRET_OK) | 587 | if (status == SALRET_OK) |
588 | ret = xpSuccess; | 588 | ret = xpSuccess; |
589 | else if (status == SALRET_MORE_PASSES) | 589 | else if (status == SALRET_MORE_PASSES) |
@@ -604,7 +604,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) | |||
604 | 604 | ||
605 | xpc_vars_sn2 = XPC_RP_VARS(rp); | 605 | xpc_vars_sn2 = XPC_RP_VARS(rp); |
606 | 606 | ||
607 | rp->sn.vars_pa = __pa(xpc_vars_sn2); | 607 | rp->sn.vars_pa = xp_pa(xpc_vars_sn2); |
608 | 608 | ||
609 | /* vars_part array follows immediately after vars */ | 609 | /* vars_part array follows immediately after vars */ |
610 | xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + | 610 | xpc_vars_part_sn2 = (struct xpc_vars_part_sn2 *)((u8 *)XPC_RP_VARS(rp) + |
@@ -649,7 +649,7 @@ xpc_rsvd_page_init_sn2(struct xpc_rsvd_page *rp) | |||
649 | xpc_vars_sn2->version = XPC_V_VERSION; | 649 | xpc_vars_sn2->version = XPC_V_VERSION; |
650 | xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0); | 650 | xpc_vars_sn2->activate_IRQ_nasid = cpuid_to_nasid(0); |
651 | xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0); | 651 | xpc_vars_sn2->activate_IRQ_phys_cpuid = cpu_physical_id(0); |
652 | xpc_vars_sn2->vars_part_pa = __pa(xpc_vars_part_sn2); | 652 | xpc_vars_sn2->vars_part_pa = xp_pa(xpc_vars_part_sn2); |
653 | xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page); | 653 | xpc_vars_sn2->amos_page_pa = ia64_tpa((u64)amos_page); |
654 | xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */ | 654 | xpc_vars_sn2->amos_page = amos_page; /* save for next load of XPC */ |
655 | 655 | ||
@@ -734,8 +734,8 @@ xpc_check_remote_hb_sn2(void) | |||
734 | } | 734 | } |
735 | 735 | ||
736 | /* pull the remote_hb cache line */ | 736 | /* pull the remote_hb cache line */ |
737 | ret = xp_remote_memcpy(remote_vars, | 737 | ret = xp_remote_memcpy(xp_pa(remote_vars), |
738 | (void *)part->sn.sn2.remote_vars_pa, | 738 | part->sn.sn2.remote_vars_pa, |
739 | XPC_RP_VARS_SIZE); | 739 | XPC_RP_VARS_SIZE); |
740 | if (ret != xpSuccess) { | 740 | if (ret != xpSuccess) { |
741 | XPC_DEACTIVATE_PARTITION(part, ret); | 741 | XPC_DEACTIVATE_PARTITION(part, ret); |
@@ -768,7 +768,8 @@ xpc_check_remote_hb_sn2(void) | |||
768 | * assumed to be of size XPC_RP_VARS_SIZE. | 768 | * assumed to be of size XPC_RP_VARS_SIZE. |
769 | */ | 769 | */ |
770 | static enum xp_retval | 770 | static enum xp_retval |
771 | xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) | 771 | xpc_get_remote_vars_sn2(unsigned long remote_vars_pa, |
772 | struct xpc_vars_sn2 *remote_vars) | ||
772 | { | 773 | { |
773 | enum xp_retval ret; | 774 | enum xp_retval ret; |
774 | 775 | ||
@@ -776,7 +777,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) | |||
776 | return xpVarsNotSet; | 777 | return xpVarsNotSet; |
777 | 778 | ||
778 | /* pull over the cross partition variables */ | 779 | /* pull over the cross partition variables */ |
779 | ret = xp_remote_memcpy(remote_vars, (void *)remote_vars_pa, | 780 | ret = xp_remote_memcpy(xp_pa(remote_vars), remote_vars_pa, |
780 | XPC_RP_VARS_SIZE); | 781 | XPC_RP_VARS_SIZE); |
781 | if (ret != xpSuccess) | 782 | if (ret != xpSuccess) |
782 | return ret; | 783 | return ret; |
@@ -791,7 +792,7 @@ xpc_get_remote_vars_sn2(u64 remote_vars_pa, struct xpc_vars_sn2 *remote_vars) | |||
791 | 792 | ||
792 | static void | 793 | static void |
793 | xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, | 794 | xpc_request_partition_activation_sn2(struct xpc_rsvd_page *remote_rp, |
794 | u64 remote_rp_pa, int nasid) | 795 | unsigned long remote_rp_pa, int nasid) |
795 | { | 796 | { |
796 | xpc_send_local_activate_IRQ_sn2(nasid); | 797 | xpc_send_local_activate_IRQ_sn2(nasid); |
797 | } | 798 | } |
@@ -883,7 +884,8 @@ xpc_partition_deactivation_requested_sn2(short partid) | |||
883 | static void | 884 | static void |
884 | xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, | 885 | xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version, |
885 | unsigned long *remote_rp_ts_jiffies, | 886 | unsigned long *remote_rp_ts_jiffies, |
886 | u64 remote_rp_pa, u64 remote_vars_pa, | 887 | unsigned long remote_rp_pa, |
888 | unsigned long remote_vars_pa, | ||
887 | struct xpc_vars_sn2 *remote_vars) | 889 | struct xpc_vars_sn2 *remote_vars) |
888 | { | 890 | { |
889 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; | 891 | struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; |
@@ -948,8 +950,8 @@ xpc_identify_activate_IRQ_req_sn2(int nasid) | |||
948 | { | 950 | { |
949 | struct xpc_rsvd_page *remote_rp; | 951 | struct xpc_rsvd_page *remote_rp; |
950 | struct xpc_vars_sn2 *remote_vars; | 952 | struct xpc_vars_sn2 *remote_vars; |
951 | u64 remote_rp_pa; | 953 | unsigned long remote_rp_pa; |
952 | u64 remote_vars_pa; | 954 | unsigned long remote_vars_pa; |
953 | int remote_rp_version; | 955 | int remote_rp_version; |
954 | int reactivate = 0; | 956 | int reactivate = 0; |
955 | unsigned long remote_rp_ts_jiffies = 0; | 957 | unsigned long remote_rp_ts_jiffies = 0; |
@@ -1291,11 +1293,11 @@ xpc_setup_infrastructure_sn2(struct xpc_partition *part) | |||
1291 | * The setting of the magic # indicates that these per partition | 1293 | * The setting of the magic # indicates that these per partition |
1292 | * specific variables are ready to be used. | 1294 | * specific variables are ready to be used. |
1293 | */ | 1295 | */ |
1294 | xpc_vars_part_sn2[partid].GPs_pa = __pa(part_sn2->local_GPs); | 1296 | xpc_vars_part_sn2[partid].GPs_pa = xp_pa(part_sn2->local_GPs); |
1295 | xpc_vars_part_sn2[partid].openclose_args_pa = | 1297 | xpc_vars_part_sn2[partid].openclose_args_pa = |
1296 | __pa(part->local_openclose_args); | 1298 | xp_pa(part->local_openclose_args); |
1297 | xpc_vars_part_sn2[partid].chctl_amo_pa = | 1299 | xpc_vars_part_sn2[partid].chctl_amo_pa = |
1298 | __pa(part_sn2->local_chctl_amo_va); | 1300 | xp_pa(part_sn2->local_chctl_amo_va); |
1299 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ | 1301 | cpuid = raw_smp_processor_id(); /* any CPU in this partition will do */ |
1300 | xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); | 1302 | xpc_vars_part_sn2[partid].notify_IRQ_nasid = cpuid_to_nasid(cpuid); |
1301 | xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = | 1303 | xpc_vars_part_sn2[partid].notify_IRQ_phys_cpuid = |
@@ -1382,25 +1384,25 @@ xpc_teardown_infrastructure_sn2(struct xpc_partition *part) | |||
1382 | * Create a wrapper that hides the underlying mechanism for pulling a cacheline | 1384 | * Create a wrapper that hides the underlying mechanism for pulling a cacheline |
1383 | * (or multiple cachelines) from a remote partition. | 1385 | * (or multiple cachelines) from a remote partition. |
1384 | * | 1386 | * |
1385 | * src must be a cacheline aligned physical address on the remote partition. | 1387 | * src_pa must be a cacheline aligned physical address on the remote partition. |
1386 | * dst must be a cacheline aligned virtual address on this partition. | 1388 | * dst must be a cacheline aligned virtual address on this partition. |
1387 | * cnt must be cacheline sized | 1389 | * cnt must be cacheline sized |
1388 | */ | 1390 | */ |
1389 | /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ | 1391 | /* ??? Replace this function by call to xp_remote_memcpy() or bte_copy()? */ |
1390 | static enum xp_retval | 1392 | static enum xp_retval |
1391 | xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, | 1393 | xpc_pull_remote_cachelines_sn2(struct xpc_partition *part, void *dst, |
1392 | const void *src, size_t cnt) | 1394 | const unsigned long src_pa, size_t cnt) |
1393 | { | 1395 | { |
1394 | enum xp_retval ret; | 1396 | enum xp_retval ret; |
1395 | 1397 | ||
1396 | DBUG_ON((u64)src != L1_CACHE_ALIGN((u64)src)); | 1398 | DBUG_ON(src_pa != L1_CACHE_ALIGN(src_pa)); |
1397 | DBUG_ON((u64)dst != L1_CACHE_ALIGN((u64)dst)); | 1399 | DBUG_ON((unsigned long)dst != L1_CACHE_ALIGN((unsigned long)dst)); |
1398 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); | 1400 | DBUG_ON(cnt != L1_CACHE_ALIGN(cnt)); |
1399 | 1401 | ||
1400 | if (part->act_state == XPC_P_DEACTIVATING) | 1402 | if (part->act_state == XPC_P_DEACTIVATING) |
1401 | return part->reason; | 1403 | return part->reason; |
1402 | 1404 | ||
1403 | ret = xp_remote_memcpy(dst, src, cnt); | 1405 | ret = xp_remote_memcpy(xp_pa(dst), src_pa, cnt); |
1404 | if (ret != xpSuccess) { | 1406 | if (ret != xpSuccess) { |
1405 | dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," | 1407 | dev_dbg(xpc_chan, "xp_remote_memcpy() from partition %d failed," |
1406 | " ret=%d\n", XPC_PARTID(part), ret); | 1408 | " ret=%d\n", XPC_PARTID(part), ret); |
@@ -1420,7 +1422,8 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) | |||
1420 | struct xpc_vars_part_sn2 *pulled_entry_cacheline = | 1422 | struct xpc_vars_part_sn2 *pulled_entry_cacheline = |
1421 | (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); | 1423 | (struct xpc_vars_part_sn2 *)L1_CACHE_ALIGN((u64)buffer); |
1422 | struct xpc_vars_part_sn2 *pulled_entry; | 1424 | struct xpc_vars_part_sn2 *pulled_entry; |
1423 | u64 remote_entry_cacheline_pa, remote_entry_pa; | 1425 | unsigned long remote_entry_cacheline_pa; |
1426 | unsigned long remote_entry_pa; | ||
1424 | short partid = XPC_PARTID(part); | 1427 | short partid = XPC_PARTID(part); |
1425 | enum xp_retval ret; | 1428 | enum xp_retval ret; |
1426 | 1429 | ||
@@ -1440,7 +1443,7 @@ xpc_pull_remote_vars_part_sn2(struct xpc_partition *part) | |||
1440 | (L1_CACHE_BYTES - 1))); | 1443 | (L1_CACHE_BYTES - 1))); |
1441 | 1444 | ||
1442 | ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, | 1445 | ret = xpc_pull_remote_cachelines_sn2(part, pulled_entry_cacheline, |
1443 | (void *)remote_entry_cacheline_pa, | 1446 | remote_entry_cacheline_pa, |
1444 | L1_CACHE_BYTES); | 1447 | L1_CACHE_BYTES); |
1445 | if (ret != xpSuccess) { | 1448 | if (ret != xpSuccess) { |
1446 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " | 1449 | dev_dbg(xpc_chan, "failed to pull XPC vars_part from " |
@@ -1587,7 +1590,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) | |||
1587 | if (xpc_any_openclose_chctl_flags_set(&chctl)) { | 1590 | if (xpc_any_openclose_chctl_flags_set(&chctl)) { |
1588 | ret = xpc_pull_remote_cachelines_sn2(part, part-> | 1591 | ret = xpc_pull_remote_cachelines_sn2(part, part-> |
1589 | remote_openclose_args, | 1592 | remote_openclose_args, |
1590 | (void *)part_sn2-> | 1593 | part_sn2-> |
1591 | remote_openclose_args_pa, | 1594 | remote_openclose_args_pa, |
1592 | XPC_OPENCLOSE_ARGS_SIZE); | 1595 | XPC_OPENCLOSE_ARGS_SIZE); |
1593 | if (ret != xpSuccess) { | 1596 | if (ret != xpSuccess) { |
@@ -1604,7 +1607,7 @@ xpc_get_chctl_all_flags_sn2(struct xpc_partition *part) | |||
1604 | 1607 | ||
1605 | if (xpc_any_msg_chctl_flags_set(&chctl)) { | 1608 | if (xpc_any_msg_chctl_flags_set(&chctl)) { |
1606 | ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, | 1609 | ret = xpc_pull_remote_cachelines_sn2(part, part_sn2->remote_GPs, |
1607 | (void *)part_sn2->remote_GPs_pa, | 1610 | part_sn2->remote_GPs_pa, |
1608 | XPC_GP_SIZE); | 1611 | XPC_GP_SIZE); |
1609 | if (ret != xpSuccess) { | 1612 | if (ret != xpSuccess) { |
1610 | XPC_DEACTIVATE_PARTITION(part, ret); | 1613 | XPC_DEACTIVATE_PARTITION(part, ret); |
@@ -1971,8 +1974,10 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) | |||
1971 | { | 1974 | { |
1972 | struct xpc_partition *part = &xpc_partitions[ch->partid]; | 1975 | struct xpc_partition *part = &xpc_partitions[ch->partid]; |
1973 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; | 1976 | struct xpc_channel_sn2 *ch_sn2 = &ch->sn.sn2; |
1974 | struct xpc_msg *remote_msg, *msg; | 1977 | unsigned long remote_msg_pa; |
1975 | u32 msg_index, nmsgs; | 1978 | struct xpc_msg *msg; |
1979 | u32 msg_index; | ||
1980 | u32 nmsgs; | ||
1976 | u64 msg_offset; | 1981 | u64 msg_offset; |
1977 | enum xp_retval ret; | 1982 | enum xp_retval ret; |
1978 | 1983 | ||
@@ -1996,10 +2001,9 @@ xpc_pull_remote_msg_sn2(struct xpc_channel *ch, s64 get) | |||
1996 | 2001 | ||
1997 | msg_offset = msg_index * ch->msg_size; | 2002 | msg_offset = msg_index * ch->msg_size; |
1998 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); | 2003 | msg = (struct xpc_msg *)((u64)ch->remote_msgqueue + msg_offset); |
1999 | remote_msg = (struct xpc_msg *)(ch->remote_msgqueue_pa + | 2004 | remote_msg_pa = ch->remote_msgqueue_pa + msg_offset; |
2000 | msg_offset); | ||
2001 | 2005 | ||
2002 | ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg, | 2006 | ret = xpc_pull_remote_cachelines_sn2(part, msg, remote_msg_pa, |
2003 | nmsgs * ch->msg_size); | 2007 | nmsgs * ch->msg_size); |
2004 | if (ret != xpSuccess) { | 2008 | if (ret != xpSuccess) { |
2005 | 2009 | ||
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 232867aa6929..c2d4ddd6e955 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
@@ -61,7 +61,7 @@ xpc_heartbeat_exit_uv(void) | |||
61 | 61 | ||
62 | static void | 62 | static void |
63 | xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, | 63 | xpc_request_partition_activation_uv(struct xpc_rsvd_page *remote_rp, |
64 | u64 remote_rp_pa, int nasid) | 64 | unsigned long remote_rp_pa, int nasid) |
65 | { | 65 | { |
66 | short partid = remote_rp->SAL_partid; | 66 | short partid = remote_rp->SAL_partid; |
67 | struct xpc_partition *part = &xpc_partitions[partid]; | 67 | struct xpc_partition *part = &xpc_partitions[partid]; |
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c index 49385f441705..4f5d62230116 100644 --- a/drivers/misc/sgi-xp/xpnet.c +++ b/drivers/misc/sgi-xp/xpnet.c | |||
@@ -44,7 +44,7 @@ struct xpnet_message { | |||
44 | u16 version; /* Version for this message */ | 44 | u16 version; /* Version for this message */ |
45 | u16 embedded_bytes; /* #of bytes embedded in XPC message */ | 45 | u16 embedded_bytes; /* #of bytes embedded in XPC message */ |
46 | u32 magic; /* Special number indicating this is xpnet */ | 46 | u32 magic; /* Special number indicating this is xpnet */ |
47 | u64 buf_pa; /* phys address of buffer to retrieve */ | 47 | unsigned long buf_pa; /* phys address of buffer to retrieve */ |
48 | u32 size; /* #of bytes in buffer */ | 48 | u32 size; /* #of bytes in buffer */ |
49 | u8 leadin_ignore; /* #of bytes to ignore at the beginning */ | 49 | u8 leadin_ignore; /* #of bytes to ignore at the beginning */ |
50 | u8 tailout_ignore; /* #of bytes to ignore at the end */ | 50 | u8 tailout_ignore; /* #of bytes to ignore at the end */ |
@@ -152,6 +152,7 @@ static void | |||
152 | xpnet_receive(short partid, int channel, struct xpnet_message *msg) | 152 | xpnet_receive(short partid, int channel, struct xpnet_message *msg) |
153 | { | 153 | { |
154 | struct sk_buff *skb; | 154 | struct sk_buff *skb; |
155 | void *dst; | ||
155 | enum xp_retval ret; | 156 | enum xp_retval ret; |
156 | struct xpnet_dev_private *priv = | 157 | struct xpnet_dev_private *priv = |
157 | (struct xpnet_dev_private *)xpnet_device->priv; | 158 | (struct xpnet_dev_private *)xpnet_device->priv; |
@@ -166,9 +167,8 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) | |||
166 | 167 | ||
167 | return; | 168 | return; |
168 | } | 169 | } |
169 | dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", | 170 | dev_dbg(xpnet, "received 0x%lx, %d, %d, %d\n", msg->buf_pa, msg->size, |
170 | (unsigned long)msg->buf_pa, msg->size, msg->leadin_ignore, | 171 | msg->leadin_ignore, msg->tailout_ignore); |
171 | msg->tailout_ignore); | ||
172 | 172 | ||
173 | /* reserve an extra cache line */ | 173 | /* reserve an extra cache line */ |
174 | skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); | 174 | skb = dev_alloc_skb(msg->size + L1_CACHE_BYTES); |
@@ -210,15 +210,12 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) | |||
210 | skb_copy_to_linear_data(skb, &msg->data, | 210 | skb_copy_to_linear_data(skb, &msg->data, |
211 | (size_t)msg->embedded_bytes); | 211 | (size_t)msg->embedded_bytes); |
212 | } else { | 212 | } else { |
213 | dst = (void *)((u64)skb->data & ~(L1_CACHE_BYTES - 1)); | ||
213 | dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" | 214 | dev_dbg(xpnet, "transferring buffer to the skb->data area;\n\t" |
214 | "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", (void *) | 215 | "xp_remote_memcpy(0x%p, 0x%p, %hu)\n", dst, |
215 | ((u64)skb->data & ~(L1_CACHE_BYTES - 1)), | ||
216 | (void *)msg->buf_pa, msg->size); | 216 | (void *)msg->buf_pa, msg->size); |
217 | 217 | ||
218 | ret = xp_remote_memcpy((void *)((u64)skb->data & | 218 | ret = xp_remote_memcpy(xp_pa(dst), msg->buf_pa, msg->size); |
219 | ~(L1_CACHE_BYTES - 1)), | ||
220 | (void *)msg->buf_pa, msg->size); | ||
221 | |||
222 | if (ret != xpSuccess) { | 219 | if (ret != xpSuccess) { |
223 | /* | 220 | /* |
224 | * !!! Need better way of cleaning skb. Currently skb | 221 | * !!! Need better way of cleaning skb. Currently skb |
@@ -226,8 +223,7 @@ xpnet_receive(short partid, int channel, struct xpnet_message *msg) | |||
226 | * !!! dev_kfree_skb. | 223 | * !!! dev_kfree_skb. |
227 | */ | 224 | */ |
228 | dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " | 225 | dev_err(xpnet, "xp_remote_memcpy(0x%p, 0x%p, 0x%hx) " |
229 | "returned error=0x%x\n", (void *) | 226 | "returned error=0x%x\n", dst, |
230 | ((u64)skb->data & ~(L1_CACHE_BYTES - 1)), | ||
231 | (void *)msg->buf_pa, msg->size, ret); | 227 | (void *)msg->buf_pa, msg->size, ret); |
232 | 228 | ||
233 | xpc_received(partid, channel, (void *)msg); | 229 | xpc_received(partid, channel, (void *)msg); |
@@ -428,13 +424,13 @@ xpnet_send(struct sk_buff *skb, struct xpnet_pending_msg *queued_msg, | |||
428 | msg->size = end_addr - start_addr; | 424 | msg->size = end_addr - start_addr; |
429 | msg->leadin_ignore = (u64)skb->data - start_addr; | 425 | msg->leadin_ignore = (u64)skb->data - start_addr; |
430 | msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); | 426 | msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); |
431 | msg->buf_pa = __pa(start_addr); | 427 | msg->buf_pa = xp_pa((void *)start_addr); |
432 | 428 | ||
433 | dev_dbg(xpnet, "sending XPC message to %d:%d\n" | 429 | dev_dbg(xpnet, "sending XPC message to %d:%d\n" |
434 | KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " | 430 | KERN_DEBUG "msg->buf_pa=0x%lx, msg->size=%u, " |
435 | "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", | 431 | "msg->leadin_ignore=%u, msg->tailout_ignore=%u\n", |
436 | dest_partid, XPC_NET_CHANNEL, (unsigned long)msg->buf_pa, | 432 | dest_partid, XPC_NET_CHANNEL, msg->buf_pa, msg->size, |
437 | msg->size, msg->leadin_ignore, msg->tailout_ignore); | 433 | msg->leadin_ignore, msg->tailout_ignore); |
438 | 434 | ||
439 | atomic_inc(&queued_msg->use_count); | 435 | atomic_inc(&queued_msg->use_count); |
440 | 436 | ||