aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/misc/sgi-gru/grukservices.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/misc/sgi-gru/grukservices.c')
-rw-r--r--drivers/misc/sgi-gru/grukservices.c131
1 files changed, 81 insertions, 50 deletions
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c
index 880c55dfb662..d8bd7d84a7cf 100644
--- a/drivers/misc/sgi-gru/grukservices.c
+++ b/drivers/misc/sgi-gru/grukservices.c
@@ -52,8 +52,10 @@
52 */ 52 */
53 53
54/* Blade percpu resources PERMANENTLY reserved for kernel use */ 54/* Blade percpu resources PERMANENTLY reserved for kernel use */
55#define GRU_NUM_KERNEL_CBR 1 55#define GRU_NUM_KERNEL_CBR 1
56#define GRU_NUM_KERNEL_DSR_BYTES 256 56#define GRU_NUM_KERNEL_DSR_BYTES 256
57#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
58 GRU_CACHE_LINE_BYTES)
57#define KERNEL_CTXNUM 15 59#define KERNEL_CTXNUM 15
58 60
59/* GRU instruction attributes for all instructions */ 61/* GRU instruction attributes for all instructions */
@@ -94,7 +96,6 @@ struct message_header {
94 char fill; 96 char fill;
95}; 97};
96 98
97#define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines))
98#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) 99#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
99 100
100static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) 101static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
@@ -122,7 +123,7 @@ int gru_get_cb_exception_detail(void *cb,
122 struct gru_control_block_extended *cbe; 123 struct gru_control_block_extended *cbe;
123 124
124 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); 125 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
125 prefetchw(cbe); /* Harmless on hardware, required for emulator */ 126 prefetchw(cbe); /* Harmless on hardware, required for emulator */
126 excdet->opc = cbe->opccpy; 127 excdet->opc = cbe->opccpy;
127 excdet->exopc = cbe->exopccpy; 128 excdet->exopc = cbe->exopccpy;
128 excdet->ecause = cbe->ecause; 129 excdet->ecause = cbe->ecause;
@@ -250,7 +251,8 @@ static inline void restore_present2(void *p, int val)
250 * Create a message queue. 251 * Create a message queue.
251 * qlines - message queue size in cache lines. Includes 2-line header. 252 * qlines - message queue size in cache lines. Includes 2-line header.
252 */ 253 */
253int gru_create_message_queue(void *p, unsigned int bytes) 254int gru_create_message_queue(struct gru_message_queue_desc *mqd,
255 void *p, unsigned int bytes, int nasid, int vector, int apicid)
254{ 256{
255 struct message_queue *mq = p; 257 struct message_queue *mq = p;
256 unsigned int qlines; 258 unsigned int qlines;
@@ -265,6 +267,12 @@ int gru_create_message_queue(void *p, unsigned int bytes)
265 mq->hstatus[0] = 0; 267 mq->hstatus[0] = 0;
266 mq->hstatus[1] = 1; 268 mq->hstatus[1] = 1;
267 mq->head = gru_mesq_head(2, qlines / 2 + 1); 269 mq->head = gru_mesq_head(2, qlines / 2 + 1);
270 mqd->mq = mq;
271 mqd->mq_gpa = uv_gpa(mq);
272 mqd->qlines = qlines;
273 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
274 mqd->interrupt_vector = vector;
275 mqd->interrupt_apicid = apicid;
268 return 0; 276 return 0;
269} 277}
270EXPORT_SYMBOL_GPL(gru_create_message_queue); 278EXPORT_SYMBOL_GPL(gru_create_message_queue);
@@ -277,8 +285,8 @@ EXPORT_SYMBOL_GPL(gru_create_message_queue);
277 * -1 - if mesq sent successfully but queue not full 285 * -1 - if mesq sent successfully but queue not full
278 * >0 - unexpected error. MQE_xxx returned 286 * >0 - unexpected error. MQE_xxx returned
279 */ 287 */
280static int send_noop_message(void *cb, 288static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
281 unsigned long mq, void *mesg) 289 void *mesg)
282{ 290{
283 const struct message_header noop_header = { 291 const struct message_header noop_header = {
284 .present = MQS_NOOP, .lines = 1}; 292 .present = MQS_NOOP, .lines = 1};
@@ -289,7 +297,7 @@ static int send_noop_message(void *cb,
289 STAT(mesq_noop); 297 STAT(mesq_noop);
290 save_mhdr = *mhdr; 298 save_mhdr = *mhdr;
291 *mhdr = noop_header; 299 *mhdr = noop_header;
292 gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA); 300 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
293 ret = gru_wait(cb); 301 ret = gru_wait(cb);
294 302
295 if (ret) { 303 if (ret) {
@@ -313,7 +321,7 @@ static int send_noop_message(void *cb,
313 break; 321 break;
314 case CBSS_PUT_NACKED: 322 case CBSS_PUT_NACKED:
315 STAT(mesq_noop_put_nacked); 323 STAT(mesq_noop_put_nacked);
316 m = mq + (gru_get_amo_value_head(cb) << 6); 324 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
317 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1, 325 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
318 IMA); 326 IMA);
319 if (gru_wait(cb) == CBS_IDLE) 327 if (gru_wait(cb) == CBS_IDLE)
@@ -333,30 +341,20 @@ static int send_noop_message(void *cb,
333/* 341/*
334 * Handle a gru_mesq full. 342 * Handle a gru_mesq full.
335 */ 343 */
336static int send_message_queue_full(void *cb, 344static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
337 unsigned long mq, void *mesg, int lines) 345 void *mesg, int lines)
338{ 346{
339 union gru_mesqhead mqh; 347 union gru_mesqhead mqh;
340 unsigned int limit, head; 348 unsigned int limit, head;
341 unsigned long avalue; 349 unsigned long avalue;
342 int half, qlines, save; 350 int half, qlines;
343 351
344 /* Determine if switching to first/second half of q */ 352 /* Determine if switching to first/second half of q */
345 avalue = gru_get_amo_value(cb); 353 avalue = gru_get_amo_value(cb);
346 head = gru_get_amo_value_head(cb); 354 head = gru_get_amo_value_head(cb);
347 limit = gru_get_amo_value_limit(cb); 355 limit = gru_get_amo_value_limit(cb);
348 356
349 /* 357 qlines = mqd->qlines;
350 * Fetch "qlines" from the queue header. Since the queue may be
351 * in memory that can't be accessed using socket addresses, use
352 * the GRU to access the data. Use DSR space from the message.
353 */
354 save = *(int *)mesg;
355 gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA);
356 if (gru_wait(cb) != CBS_IDLE)
357 goto cberr;
358 qlines = *(int *)mesg;
359 *(int *)mesg = save;
360 half = (limit != qlines); 358 half = (limit != qlines);
361 359
362 if (half) 360 if (half)
@@ -365,7 +363,7 @@ static int send_message_queue_full(void *cb,
365 mqh = gru_mesq_head(2, qlines / 2 + 1); 363 mqh = gru_mesq_head(2, qlines / 2 + 1);
366 364
367 /* Try to get lock for switching head pointer */ 365 /* Try to get lock for switching head pointer */
368 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA); 366 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
369 if (gru_wait(cb) != CBS_IDLE) 367 if (gru_wait(cb) != CBS_IDLE)
370 goto cberr; 368 goto cberr;
371 if (!gru_get_amo_value(cb)) { 369 if (!gru_get_amo_value(cb)) {
@@ -375,8 +373,8 @@ static int send_message_queue_full(void *cb,
375 373
376 /* Got the lock. Send optional NOP if queue not full, */ 374 /* Got the lock. Send optional NOP if queue not full, */
377 if (head != limit) { 375 if (head != limit) {
378 if (send_noop_message(cb, mq, mesg)) { 376 if (send_noop_message(cb, mqd, mesg)) {
379 gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), 377 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
380 XTYPE_DW, IMA); 378 XTYPE_DW, IMA);
381 if (gru_wait(cb) != CBS_IDLE) 379 if (gru_wait(cb) != CBS_IDLE)
382 goto cberr; 380 goto cberr;
@@ -387,14 +385,16 @@ static int send_message_queue_full(void *cb,
387 } 385 }
388 386
389 /* Then flip queuehead to other half of queue. */ 387 /* Then flip queuehead to other half of queue. */
390 gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA); 388 gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
389 IMA);
391 if (gru_wait(cb) != CBS_IDLE) 390 if (gru_wait(cb) != CBS_IDLE)
392 goto cberr; 391 goto cberr;
393 392
394 /* If not successfully in swapping queue head, clear the hstatus lock */ 393 /* If not successfully in swapping queue head, clear the hstatus lock */
395 if (gru_get_amo_value(cb) != avalue) { 394 if (gru_get_amo_value(cb) != avalue) {
396 STAT(mesq_qf_switch_head_failed); 395 STAT(mesq_qf_switch_head_failed);
397 gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA); 396 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
397 IMA);
398 if (gru_wait(cb) != CBS_IDLE) 398 if (gru_wait(cb) != CBS_IDLE)
399 goto cberr; 399 goto cberr;
400 } 400 }
@@ -404,15 +404,25 @@ cberr:
404 return MQE_UNEXPECTED_CB_ERR; 404 return MQE_UNEXPECTED_CB_ERR;
405} 405}
406 406
407/*
408 * Send a cross-partition interrupt to the SSI that contains the target
409 * message queue. Normally, the interrupt is automatically delivered by hardware
410 * but some error conditions require explicit delivery.
411 */
412static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
413{
414 if (mqd->interrupt_vector)
415 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
416 mqd->interrupt_vector);
417}
418
407 419
408/* 420/*
409 * Handle a gru_mesq failure. Some of these failures are software recoverable 421 * Handle a gru_mesq failure. Some of these failures are software recoverable
410 * or retryable. 422 * or retryable.
411 */ 423 */
412static int send_message_failure(void *cb, 424static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
413 unsigned long mq, 425 void *mesg, int lines)
414 void *mesg,
415 int lines)
416{ 426{
417 int substatus, ret = 0; 427 int substatus, ret = 0;
418 unsigned long m; 428 unsigned long m;
@@ -429,7 +439,7 @@ static int send_message_failure(void *cb,
429 break; 439 break;
430 case CBSS_QLIMIT_REACHED: 440 case CBSS_QLIMIT_REACHED:
431 STAT(mesq_send_qlimit_reached); 441 STAT(mesq_send_qlimit_reached);
432 ret = send_message_queue_full(cb, mq, mesg, lines); 442 ret = send_message_queue_full(cb, mqd, mesg, lines);
433 break; 443 break;
434 case CBSS_AMO_NACKED: 444 case CBSS_AMO_NACKED:
435 STAT(mesq_send_amo_nacked); 445 STAT(mesq_send_amo_nacked);
@@ -437,12 +447,14 @@ static int send_message_failure(void *cb,
437 break; 447 break;
438 case CBSS_PUT_NACKED: 448 case CBSS_PUT_NACKED:
439 STAT(mesq_send_put_nacked); 449 STAT(mesq_send_put_nacked);
440 m =mq + (gru_get_amo_value_head(cb) << 6); 450 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
441 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); 451 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
442 if (gru_wait(cb) == CBS_IDLE) 452 if (gru_wait(cb) == CBS_IDLE) {
443 ret = MQE_OK; 453 ret = MQE_OK;
444 else 454 send_message_queue_interrupt(mqd);
455 } else {
445 ret = MQE_UNEXPECTED_CB_ERR; 456 ret = MQE_UNEXPECTED_CB_ERR;
457 }
446 break; 458 break;
447 default: 459 default:
448 BUG(); 460 BUG();
@@ -452,12 +464,12 @@ static int send_message_failure(void *cb,
452 464
453/* 465/*
454 * Send a message to a message queue 466 * Send a message to a message queue
455 * cb GRU control block to use to send message 467 * mqd message queue descriptor
456 * mq message queue
457 * mesg message. ust be vaddr within a GSEG 468 * mesg message. ust be vaddr within a GSEG
458 * bytes message size (<= 2 CL) 469 * bytes message size (<= 2 CL)
459 */ 470 */
460int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes) 471int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
472 unsigned int bytes)
461{ 473{
462 struct message_header *mhdr; 474 struct message_header *mhdr;
463 void *cb; 475 void *cb;
@@ -481,10 +493,10 @@ int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)
481 493
482 do { 494 do {
483 ret = MQE_OK; 495 ret = MQE_OK;
484 gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA); 496 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
485 istatus = gru_wait(cb); 497 istatus = gru_wait(cb);
486 if (istatus != CBS_IDLE) 498 if (istatus != CBS_IDLE)
487 ret = send_message_failure(cb, mq, dsr, clines); 499 ret = send_message_failure(cb, mqd, dsr, clines);
488 } while (ret == MQIE_AGAIN); 500 } while (ret == MQIE_AGAIN);
489 gru_free_cpu_resources(cb, dsr); 501 gru_free_cpu_resources(cb, dsr);
490 502
@@ -497,9 +509,9 @@ EXPORT_SYMBOL_GPL(gru_send_message_gpa);
497/* 509/*
498 * Advance the receive pointer for the queue to the next message. 510 * Advance the receive pointer for the queue to the next message.
499 */ 511 */
500void gru_free_message(void *rmq, void *mesg) 512void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
501{ 513{
502 struct message_queue *mq = rmq; 514 struct message_queue *mq = mqd->mq;
503 struct message_header *mhdr = mq->next; 515 struct message_header *mhdr = mq->next;
504 void *next, *pnext; 516 void *next, *pnext;
505 int half = -1; 517 int half = -1;
@@ -529,16 +541,16 @@ EXPORT_SYMBOL_GPL(gru_free_message);
529 * present. User must call next_message() to move to next message. 541 * present. User must call next_message() to move to next message.
530 * rmq message queue 542 * rmq message queue
531 */ 543 */
532void *gru_get_next_message(void *rmq) 544void *gru_get_next_message(struct gru_message_queue_desc *mqd)
533{ 545{
534 struct message_queue *mq = rmq; 546 struct message_queue *mq = mqd->mq;
535 struct message_header *mhdr = mq->next; 547 struct message_header *mhdr = mq->next;
536 int present = mhdr->present; 548 int present = mhdr->present;
537 549
538 /* skip NOOP messages */ 550 /* skip NOOP messages */
539 STAT(mesq_receive); 551 STAT(mesq_receive);
540 while (present == MQS_NOOP) { 552 while (present == MQS_NOOP) {
541 gru_free_message(rmq, mhdr); 553 gru_free_message(mqd, mhdr);
542 mhdr = mq->next; 554 mhdr = mq->next;
543 present = mhdr->present; 555 present = mhdr->present;
544 } 556 }
@@ -576,7 +588,7 @@ int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
576 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) 588 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
577 return MQE_BUG_NO_RESOURCES; 589 return MQE_BUG_NO_RESOURCES;
578 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr), 590 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
579 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA); 591 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
580 ret = gru_wait(cb); 592 ret = gru_wait(cb);
581 gru_free_cpu_resources(cb, dsr); 593 gru_free_cpu_resources(cb, dsr);
582 return ret; 594 return ret;
@@ -611,7 +623,7 @@ static int quicktest(struct gru_state *gru)
611 623
612 if (word0 != word1 || word0 != MAGIC) { 624 if (word0 != word1 || word0 != MAGIC) {
613 printk 625 printk
614 ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n", 626 ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n",
615 gru->gs_gid, word1, MAGIC); 627 gru->gs_gid, word1, MAGIC);
616 BUG(); /* ZZZ should not be fatal */ 628 BUG(); /* ZZZ should not be fatal */
617 } 629 }
@@ -660,15 +672,15 @@ int gru_kservices_init(struct gru_state *gru)
660 cch->tlb_int_enable = 0; 672 cch->tlb_int_enable = 0;
661 cch->tfm_done_bit_enable = 0; 673 cch->tfm_done_bit_enable = 0;
662 cch->unmap_enable = 1; 674 cch->unmap_enable = 1;
663 err = cch_allocate(cch, 0, cbr_map, dsr_map); 675 err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
664 if (err) { 676 if (err) {
665 gru_dbg(grudev, 677 gru_dbg(grudev,
666 "Unable to allocate kernel CCH: gru %d, err %d\n", 678 "Unable to allocate kernel CCH: gid %d, err %d\n",
667 gru->gs_gid, err); 679 gru->gs_gid, err);
668 BUG(); 680 BUG();
669 } 681 }
670 if (cch_start(cch)) { 682 if (cch_start(cch)) {
671 gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n", 683 gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n",
672 gru->gs_gid, err); 684 gru->gs_gid, err);
673 BUG(); 685 BUG();
674 } 686 }
@@ -678,3 +690,22 @@ int gru_kservices_init(struct gru_state *gru)
678 quicktest(gru); 690 quicktest(gru);
679 return 0; 691 return 0;
680} 692}
693
694void gru_kservices_exit(struct gru_state *gru)
695{
696 struct gru_context_configuration_handle *cch;
697 struct gru_blade_state *bs;
698
699 bs = gru->gs_blade;
700 if (gru != &bs->bs_grus[1])
701 return;
702
703 cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
704 lock_cch_handle(cch);
705 if (cch_interrupt_sync(cch))
706 BUG();
707 if (cch_deallocate(cch))
708 BUG();
709 unlock_cch_handle(cch);
710}
711