aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-xp
diff options
context:
space:
mode:
authorDean Nelson <dcn@sgi.com>2008-11-05 18:28:00 -0500
committerH. Peter Anvin <hpa@zytor.com>2008-11-05 23:32:20 -0500
commit2525789b4694d78df4f001063f042b2b74227d26 (patch)
treed1152fdf85ac13770c21ec25e2dca520a7a56af4 /drivers/misc/sgi-xp
parent6c1c325de908cbc444cf284f59c3a892161012e9 (diff)
sgi-xp: create activate and notify gru message queues
For UV add the code to create the activate and notify gru message queues. Signed-off-by: Dean Nelson <dcn@sgi.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'drivers/misc/sgi-xp')
-rw-r--r--drivers/misc/sgi-xp/xpc.h12
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c259
2 files changed, 218 insertions, 53 deletions
diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
index 619208d61862..a5bd658c2e83 100644
--- a/drivers/misc/sgi-xp/xpc.h
+++ b/drivers/misc/sgi-xp/xpc.h
@@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 {
181 xpc_nasid_mask_nlongs)) 181 xpc_nasid_mask_nlongs))
182 182
183/* 183/*
184 * Info pertinent to a GRU message queue using a watch list for irq generation.
185 */
186struct xpc_gru_mq_uv {
187 void *address; /* address of GRU message queue */
188 unsigned int order; /* size of GRU message queue as a power of 2 */
189 int irq; /* irq raised when message is received in mq */
190 int mmr_blade; /* blade where watchlist was allocated from */
191 unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
192 int watchlist_num; /* number of watchlist allocatd by BIOS */
193};
194
195/*
184 * The activate_mq is used to send/receive GRU messages that affect XPC's 196 * The activate_mq is used to send/receive GRU messages that affect XPC's
185 * heartbeat, partition active state, and channel state. This is UV only. 197 * heartbeat, partition active state, and channel state. This is UV only.
186 */ 198 */
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index 1ac694c01623..ec526c7ff70c 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -18,7 +18,15 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/err.h>
21#include <asm/uv/uv_hub.h> 22#include <asm/uv/uv_hub.h>
23#if defined CONFIG_X86_64
24#include <asm/uv/bios.h>
25#include <asm/uv/uv_irq.h>
26#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
27#include <asm/sn/intr.h>
28#include <asm/sn/sn_sal.h>
29#endif
22#include "../sgi-gru/gru.h" 30#include "../sgi-gru/gru.h"
23#include "../sgi-gru/grukservices.h" 31#include "../sgi-gru/grukservices.h"
24#include "xpc.h" 32#include "xpc.h"
@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv;
27static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV); 35static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
28 36
29#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES) 37#define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
30#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES) 38#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
39 XPC_ACTIVATE_MSG_SIZE_UV)
40#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
31 41
32#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 42#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
33 XPC_ACTIVATE_MSG_SIZE_UV) 43#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
34#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \ 44 XPC_NOTIFY_MSG_SIZE_UV)
35 XPC_NOTIFY_MSG_SIZE_UV) 45#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
36 46
37static void *xpc_activate_mq_uv; 47static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
38static void *xpc_notify_mq_uv; 48static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
39 49
40static int 50static int
41xpc_setup_partitions_sn_uv(void) 51xpc_setup_partitions_sn_uv(void)
@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void)
52 return 0; 62 return 0;
53} 63}
54 64
55static void * 65static int
56xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq, 66xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
67{
68#if defined CONFIG_X86_64
69 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
70 if (mq->irq < 0) {
71 dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
72 mq->irq);
73 }
74
75#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
76 int mmr_pnode;
77 unsigned long mmr_value;
78
79 if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
80 mq->irq = SGI_XPC_ACTIVATE;
81 else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
82 mq->irq = SGI_XPC_NOTIFY;
83 else
84 return -EINVAL;
85
86 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
87 mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
88
89 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
90#else
91 #error not a supported configuration
92#endif
93
94 return 0;
95}
96
97static void
98xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
99{
100#if defined CONFIG_X86_64
101 uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
102
103#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
104 int mmr_pnode;
105 unsigned long mmr_value;
106
107 mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
108 mmr_value = 1UL << 16;
109
110 uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
111#else
112 #error not a supported configuration
113#endif
114}
115
116static int
117xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
118{
119 int ret;
120
121#if defined CONFIG_X86_64
122 ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
123 &mq->mmr_offset);
124 if (ret < 0) {
125 dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
126 "ret=%d\n", ret);
127 return ret;
128 }
129#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
130 ret = sn_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
131 &mq->mmr_offset);
132 if (ret < 0) {
133 dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
134 ret);
135 return -EBUSY;
136 }
137#else
138 #error not a supported configuration
139#endif
140
141 mq->watchlist_num = ret;
142 return 0;
143}
144
145static void
146xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
147{
148 int ret;
149
150#if defined CONFIG_X86_64
151 ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
152 BUG_ON(ret != BIOS_STATUS_SUCCESS);
153#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
154 ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
155 BUG_ON(ret != SALRET_OK);
156#else
157 #error not a supported configuration
158#endif
159}
160
161static struct xpc_gru_mq_uv *
162xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
57 irq_handler_t irq_handler) 163 irq_handler_t irq_handler)
58{ 164{
165 enum xp_retval xp_ret;
59 int ret; 166 int ret;
60 int nid; 167 int nid;
61 int mq_order; 168 int pg_order;
62 struct page *page; 169 struct page *page;
63 void *mq; 170 struct xpc_gru_mq_uv *mq;
171
172 mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
173 if (mq == NULL) {
174 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
175 "a xpc_gru_mq_uv structure\n");
176 ret = -ENOMEM;
177 goto out_1;
178 }
64 179
65 nid = cpu_to_node(cpuid); 180 pg_order = get_order(mq_size);
66 mq_order = get_order(mq_size); 181 mq->order = pg_order + PAGE_SHIFT;
182 mq_size = 1UL << mq->order;
183
184 mq->mmr_blade = uv_cpu_to_blade_id(cpu);
185
186 nid = cpu_to_node(cpu);
67 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, 187 page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
68 mq_order); 188 pg_order);
69 if (page == NULL) { 189 if (page == NULL) {
70 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " 190 dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
71 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); 191 "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
72 return NULL; 192 ret = -ENOMEM;
193 goto out_2;
73 } 194 }
195 mq->address = page_address(page);
74 196
75 mq = page_address(page); 197 ret = gru_create_message_queue(mq->address, mq_size);
76 ret = gru_create_message_queue(mq, mq_size);
77 if (ret != 0) { 198 if (ret != 0) {
78 dev_err(xpc_part, "gru_create_message_queue() returned " 199 dev_err(xpc_part, "gru_create_message_queue() returned "
79 "error=%d\n", ret); 200 "error=%d\n", ret);
80 free_pages((unsigned long)mq, mq_order); 201 ret = -EINVAL;
81 return NULL; 202 goto out_3;
82 } 203 }
83 204
84 /* !!! Need to do some other things to set up IRQ */ 205 /* enable generation of irq when GRU mq operation occurs to this mq */
206 ret = xpc_gru_mq_watchlist_alloc_uv(mq);
207 if (ret != 0)
208 goto out_3;
85 209
86 ret = request_irq(irq, irq_handler, 0, "xpc", NULL); 210 ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
211 if (ret != 0)
212 goto out_4;
213
214 ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
87 if (ret != 0) { 215 if (ret != 0) {
88 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n", 216 dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
89 irq, ret); 217 mq->irq, ret);
90 free_pages((unsigned long)mq, mq_order); 218 goto out_5;
91 return NULL;
92 } 219 }
93 220
94 /* !!! enable generation of irq when GRU mq op occurs to this mq */ 221 /* allow other partitions to access this GRU mq */
95 222 xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
96 /* ??? allow other partitions to access GRU mq? */ 223 if (xp_ret != xpSuccess) {
224 ret = -EACCES;
225 goto out_6;
226 }
97 227
98 return mq; 228 return mq;
229
230 /* something went wrong */
231out_6:
232 free_irq(mq->irq, NULL);
233out_5:
234 xpc_release_gru_mq_irq_uv(mq);
235out_4:
236 xpc_gru_mq_watchlist_free_uv(mq);
237out_3:
238 free_pages((unsigned long)mq->address, pg_order);
239out_2:
240 kfree(mq);
241out_1:
242 return ERR_PTR(ret);
99} 243}
100 244
101static void 245static void
102xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq) 246xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
103{ 247{
104 /* ??? disallow other partitions to access GRU mq? */ 248 unsigned int mq_size;
249 int pg_order;
250 int ret;
251
252 /* disallow other partitions to access GRU mq */
253 mq_size = 1UL << mq->order;
254 ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
255 BUG_ON(ret != xpSuccess);
256
257 /* unregister irq handler and release mq irq/vector mapping */
258 free_irq(mq->irq, NULL);
259 xpc_release_gru_mq_irq_uv(mq);
105 260
106 /* !!! disable generation of irq when GRU mq op occurs to this mq */ 261 /* disable generation of irq when GRU mq op occurs to this mq */
262 xpc_gru_mq_watchlist_free_uv(mq);
107 263
108 free_irq(irq, NULL); 264 pg_order = mq->order - PAGE_SHIFT;
265 free_pages((unsigned long)mq->address, pg_order);
109 266
110 free_pages((unsigned long)mq, get_order(mq_size)); 267 kfree(mq);
111} 268}
112 269
113static enum xp_retval 270static enum xp_retval
@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
402 struct xpc_partition *part; 559 struct xpc_partition *part;
403 int wakeup_hb_checker = 0; 560 int wakeup_hb_checker = 0;
404 561
405 while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) { 562 while (1) {
563 msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address);
564 if (msg_hdr == NULL)
565 break;
406 566
407 partid = msg_hdr->partid; 567 partid = msg_hdr->partid;
408 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) { 568 if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void *dev_id)
418 } 578 }
419 } 579 }
420 580
421 gru_free_message(xpc_activate_mq_uv, msg_hdr); 581 gru_free_message(xpc_activate_mq_uv->address, msg_hdr);
422 } 582 }
423 583
424 if (wakeup_hb_checker) 584 if (wakeup_hb_checker)
@@ -507,7 +667,7 @@ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
507static int 667static int
508xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp) 668xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
509{ 669{
510 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv); 670 rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address);
511 return 0; 671 return 0;
512} 672}
513 673
@@ -1411,22 +1571,18 @@ xpc_init_uv(void)
1411 return -E2BIG; 1571 return -E2BIG;
1412 } 1572 }
1413 1573
1414 /* ??? The cpuid argument's value is 0, is that what we want? */ 1574 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
1415 /* !!! The irq argument's value isn't correct. */ 1575 XPC_ACTIVATE_IRQ_NAME,
1416 xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
1417 xpc_handle_activate_IRQ_uv); 1576 xpc_handle_activate_IRQ_uv);
1418 if (xpc_activate_mq_uv == NULL) 1577 if (IS_ERR(xpc_activate_mq_uv))
1419 return -ENOMEM; 1578 return PTR_ERR(xpc_activate_mq_uv);
1420 1579
1421 /* ??? The cpuid argument's value is 0, is that what we want? */ 1580 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
1422 /* !!! The irq argument's value isn't correct. */ 1581 XPC_NOTIFY_IRQ_NAME,
1423 xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
1424 xpc_handle_notify_IRQ_uv); 1582 xpc_handle_notify_IRQ_uv);
1425 if (xpc_notify_mq_uv == NULL) { 1583 if (IS_ERR(xpc_notify_mq_uv)) {
1426 /* !!! The irq argument's value isn't correct. */ 1584 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1427 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, 1585 return PTR_ERR(xpc_notify_mq_uv);
1428 XPC_ACTIVATE_MQ_SIZE_UV, 0);
1429 return -ENOMEM;
1430 } 1586 }
1431 1587
1432 return 0; 1588 return 0;
@@ -1435,9 +1591,6 @@ xpc_init_uv(void)
1435void 1591void
1436xpc_exit_uv(void) 1592xpc_exit_uv(void)
1437{ 1593{
1438 /* !!! The irq argument's value isn't correct. */ 1594 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
1439 xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0); 1595 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
1440
1441 /* !!! The irq argument's value isn't correct. */
1442 xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
1443} 1596}