diff options
Diffstat (limited to 'drivers/misc/sgi-gru/grukservices.c')
-rw-r--r-- | drivers/misc/sgi-gru/grukservices.c | 211 |
1 files changed, 157 insertions, 54 deletions
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 766e21e15574..34749ee88dfa 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/uaccess.h> | 32 | #include <linux/uaccess.h> |
33 | #include <linux/delay.h> | 33 | #include <linux/delay.h> |
34 | #include <asm/io_apic.h> | ||
34 | #include "gru.h" | 35 | #include "gru.h" |
35 | #include "grulib.h" | 36 | #include "grulib.h" |
36 | #include "grutables.h" | 37 | #include "grutables.h" |
@@ -97,9 +98,6 @@ | |||
97 | #define ASYNC_HAN_TO_BID(h) ((h) - 1) | 98 | #define ASYNC_HAN_TO_BID(h) ((h) - 1) |
98 | #define ASYNC_BID_TO_HAN(b) ((b) + 1) | 99 | #define ASYNC_BID_TO_HAN(b) ((b) + 1) |
99 | #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] | 100 | #define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)] |
100 | #define KCB_TO_GID(cb) ((cb - gru_start_vaddr) / \ | ||
101 | (GRU_SIZE * GRU_CHIPLETS_PER_BLADE)) | ||
102 | #define KCB_TO_BS(cb) gru_base[KCB_TO_GID(cb)] | ||
103 | 101 | ||
104 | #define GRU_NUM_KERNEL_CBR 1 | 102 | #define GRU_NUM_KERNEL_CBR 1 |
105 | #define GRU_NUM_KERNEL_DSR_BYTES 256 | 103 | #define GRU_NUM_KERNEL_DSR_BYTES 256 |
@@ -160,8 +158,10 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) | |||
160 | up_read(&bs->bs_kgts_sema); | 158 | up_read(&bs->bs_kgts_sema); |
161 | down_write(&bs->bs_kgts_sema); | 159 | down_write(&bs->bs_kgts_sema); |
162 | 160 | ||
163 | if (!bs->bs_kgts) | 161 | if (!bs->bs_kgts) { |
164 | bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); | 162 | bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0); |
163 | bs->bs_kgts->ts_user_blade_id = blade_id; | ||
164 | } | ||
165 | kgts = bs->bs_kgts; | 165 | kgts = bs->bs_kgts; |
166 | 166 | ||
167 | if (!kgts->ts_gru) { | 167 | if (!kgts->ts_gru) { |
@@ -172,9 +172,9 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) | |||
172 | kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( | 172 | kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU( |
173 | GRU_NUM_KERNEL_DSR_BYTES * ncpus + | 173 | GRU_NUM_KERNEL_DSR_BYTES * ncpus + |
174 | bs->bs_async_dsr_bytes); | 174 | bs->bs_async_dsr_bytes); |
175 | while (!gru_assign_gru_context(kgts, blade_id)) { | 175 | while (!gru_assign_gru_context(kgts)) { |
176 | msleep(1); | 176 | msleep(1); |
177 | gru_steal_context(kgts, blade_id); | 177 | gru_steal_context(kgts); |
178 | } | 178 | } |
179 | gru_load_context(kgts); | 179 | gru_load_context(kgts); |
180 | gru = bs->bs_kgts->ts_gru; | 180 | gru = bs->bs_kgts->ts_gru; |
@@ -200,13 +200,15 @@ static int gru_free_kernel_contexts(void) | |||
200 | bs = gru_base[bid]; | 200 | bs = gru_base[bid]; |
201 | if (!bs) | 201 | if (!bs) |
202 | continue; | 202 | continue; |
203 | |||
204 | /* Ignore busy contexts. Don't want to block here. */ | ||
203 | if (down_write_trylock(&bs->bs_kgts_sema)) { | 205 | if (down_write_trylock(&bs->bs_kgts_sema)) { |
204 | kgts = bs->bs_kgts; | 206 | kgts = bs->bs_kgts; |
205 | if (kgts && kgts->ts_gru) | 207 | if (kgts && kgts->ts_gru) |
206 | gru_unload_context(kgts, 0); | 208 | gru_unload_context(kgts, 0); |
207 | kfree(kgts); | ||
208 | bs->bs_kgts = NULL; | 209 | bs->bs_kgts = NULL; |
209 | up_write(&bs->bs_kgts_sema); | 210 | up_write(&bs->bs_kgts_sema); |
211 | kfree(kgts); | ||
210 | } else { | 212 | } else { |
211 | ret++; | 213 | ret++; |
212 | } | 214 | } |
@@ -220,13 +222,21 @@ static int gru_free_kernel_contexts(void) | |||
220 | static struct gru_blade_state *gru_lock_kernel_context(int blade_id) | 222 | static struct gru_blade_state *gru_lock_kernel_context(int blade_id) |
221 | { | 223 | { |
222 | struct gru_blade_state *bs; | 224 | struct gru_blade_state *bs; |
225 | int bid; | ||
223 | 226 | ||
224 | STAT(lock_kernel_context); | 227 | STAT(lock_kernel_context); |
225 | bs = gru_base[blade_id]; | 228 | again: |
229 | bid = blade_id < 0 ? uv_numa_blade_id() : blade_id; | ||
230 | bs = gru_base[bid]; | ||
226 | 231 | ||
232 | /* Handle the case where migration occured while waiting for the sema */ | ||
227 | down_read(&bs->bs_kgts_sema); | 233 | down_read(&bs->bs_kgts_sema); |
234 | if (blade_id < 0 && bid != uv_numa_blade_id()) { | ||
235 | up_read(&bs->bs_kgts_sema); | ||
236 | goto again; | ||
237 | } | ||
228 | if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) | 238 | if (!bs->bs_kgts || !bs->bs_kgts->ts_gru) |
229 | gru_load_kernel_context(bs, blade_id); | 239 | gru_load_kernel_context(bs, bid); |
230 | return bs; | 240 | return bs; |
231 | 241 | ||
232 | } | 242 | } |
@@ -255,7 +265,7 @@ static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) | |||
255 | 265 | ||
256 | BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); | 266 | BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); |
257 | preempt_disable(); | 267 | preempt_disable(); |
258 | bs = gru_lock_kernel_context(uv_numa_blade_id()); | 268 | bs = gru_lock_kernel_context(-1); |
259 | lcpu = uv_blade_processor_id(); | 269 | lcpu = uv_blade_processor_id(); |
260 | *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; | 270 | *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; |
261 | *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; | 271 | *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; |
@@ -384,13 +394,31 @@ int gru_get_cb_exception_detail(void *cb, | |||
384 | struct control_block_extended_exc_detail *excdet) | 394 | struct control_block_extended_exc_detail *excdet) |
385 | { | 395 | { |
386 | struct gru_control_block_extended *cbe; | 396 | struct gru_control_block_extended *cbe; |
387 | struct gru_blade_state *bs; | 397 | struct gru_thread_state *kgts = NULL; |
388 | int cbrnum; | 398 | unsigned long off; |
389 | 399 | int cbrnum, bid; | |
390 | bs = KCB_TO_BS(cb); | 400 | |
391 | cbrnum = thread_cbr_number(bs->bs_kgts, get_cb_number(cb)); | 401 | /* |
402 | * Locate kgts for cb. This algorithm is SLOW but | ||
403 | * this function is rarely called (ie., almost never). | ||
404 | * Performance does not matter. | ||
405 | */ | ||
406 | for_each_possible_blade(bid) { | ||
407 | if (!gru_base[bid]) | ||
408 | break; | ||
409 | kgts = gru_base[bid]->bs_kgts; | ||
410 | if (!kgts || !kgts->ts_gru) | ||
411 | continue; | ||
412 | off = cb - kgts->ts_gru->gs_gru_base_vaddr; | ||
413 | if (off < GRU_SIZE) | ||
414 | break; | ||
415 | kgts = NULL; | ||
416 | } | ||
417 | BUG_ON(!kgts); | ||
418 | cbrnum = thread_cbr_number(kgts, get_cb_number(cb)); | ||
392 | cbe = get_cbe(GRUBASE(cb), cbrnum); | 419 | cbe = get_cbe(GRUBASE(cb), cbrnum); |
393 | gru_flush_cache(cbe); /* CBE not coherent */ | 420 | gru_flush_cache(cbe); /* CBE not coherent */ |
421 | sync_core(); | ||
394 | excdet->opc = cbe->opccpy; | 422 | excdet->opc = cbe->opccpy; |
395 | excdet->exopc = cbe->exopccpy; | 423 | excdet->exopc = cbe->exopccpy; |
396 | excdet->ecause = cbe->ecause; | 424 | excdet->ecause = cbe->ecause; |
@@ -409,8 +437,8 @@ char *gru_get_cb_exception_detail_str(int ret, void *cb, | |||
409 | if (ret > 0 && gen->istatus == CBS_EXCEPTION) { | 437 | if (ret > 0 && gen->istatus == CBS_EXCEPTION) { |
410 | gru_get_cb_exception_detail(cb, &excdet); | 438 | gru_get_cb_exception_detail(cb, &excdet); |
411 | snprintf(buf, size, | 439 | snprintf(buf, size, |
412 | "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x," | 440 | "GRU:%d exception: cb %p, opc %d, exopc %d, ecause 0x%x," |
413 | "excdet0 0x%lx, excdet1 0x%x", | 441 | "excdet0 0x%lx, excdet1 0x%x", smp_processor_id(), |
414 | gen, excdet.opc, excdet.exopc, excdet.ecause, | 442 | gen, excdet.opc, excdet.exopc, excdet.ecause, |
415 | excdet.exceptdet0, excdet.exceptdet1); | 443 | excdet.exceptdet0, excdet.exceptdet1); |
416 | } else { | 444 | } else { |
@@ -457,9 +485,10 @@ int gru_check_status_proc(void *cb) | |||
457 | int ret; | 485 | int ret; |
458 | 486 | ||
459 | ret = gen->istatus; | 487 | ret = gen->istatus; |
460 | if (ret != CBS_EXCEPTION) | 488 | if (ret == CBS_EXCEPTION) |
461 | return ret; | 489 | ret = gru_retry_exception(cb); |
462 | return gru_retry_exception(cb); | 490 | rmb(); |
491 | return ret; | ||
463 | 492 | ||
464 | } | 493 | } |
465 | 494 | ||
@@ -471,7 +500,7 @@ int gru_wait_proc(void *cb) | |||
471 | ret = gru_wait_idle_or_exception(gen); | 500 | ret = gru_wait_idle_or_exception(gen); |
472 | if (ret == CBS_EXCEPTION) | 501 | if (ret == CBS_EXCEPTION) |
473 | ret = gru_retry_exception(cb); | 502 | ret = gru_retry_exception(cb); |
474 | 503 | rmb(); | |
475 | return ret; | 504 | return ret; |
476 | } | 505 | } |
477 | 506 | ||
@@ -538,7 +567,7 @@ int gru_create_message_queue(struct gru_message_queue_desc *mqd, | |||
538 | mqd->mq = mq; | 567 | mqd->mq = mq; |
539 | mqd->mq_gpa = uv_gpa(mq); | 568 | mqd->mq_gpa = uv_gpa(mq); |
540 | mqd->qlines = qlines; | 569 | mqd->qlines = qlines; |
541 | mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid); | 570 | mqd->interrupt_pnode = nasid >> 1; |
542 | mqd->interrupt_vector = vector; | 571 | mqd->interrupt_vector = vector; |
543 | mqd->interrupt_apicid = apicid; | 572 | mqd->interrupt_apicid = apicid; |
544 | return 0; | 573 | return 0; |
@@ -598,6 +627,8 @@ static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd, | |||
598 | ret = MQE_UNEXPECTED_CB_ERR; | 627 | ret = MQE_UNEXPECTED_CB_ERR; |
599 | break; | 628 | break; |
600 | case CBSS_PAGE_OVERFLOW: | 629 | case CBSS_PAGE_OVERFLOW: |
630 | STAT(mesq_noop_page_overflow); | ||
631 | /* fallthru */ | ||
601 | default: | 632 | default: |
602 | BUG(); | 633 | BUG(); |
603 | } | 634 | } |
@@ -673,18 +704,6 @@ cberr: | |||
673 | } | 704 | } |
674 | 705 | ||
675 | /* | 706 | /* |
676 | * Send a cross-partition interrupt to the SSI that contains the target | ||
677 | * message queue. Normally, the interrupt is automatically delivered by hardware | ||
678 | * but some error conditions require explicit delivery. | ||
679 | */ | ||
680 | static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd) | ||
681 | { | ||
682 | if (mqd->interrupt_vector) | ||
683 | uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid, | ||
684 | mqd->interrupt_vector); | ||
685 | } | ||
686 | |||
687 | /* | ||
688 | * Handle a PUT failure. Note: if message was a 2-line message, one of the | 707 | * Handle a PUT failure. Note: if message was a 2-line message, one of the |
689 | * lines might have successfully have been written. Before sending the | 708 | * lines might have successfully have been written. Before sending the |
690 | * message, "present" must be cleared in BOTH lines to prevent the receiver | 709 | * message, "present" must be cleared in BOTH lines to prevent the receiver |
@@ -693,7 +712,8 @@ static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd) | |||
693 | static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, | 712 | static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, |
694 | void *mesg, int lines) | 713 | void *mesg, int lines) |
695 | { | 714 | { |
696 | unsigned long m; | 715 | unsigned long m, *val = mesg, gpa, save; |
716 | int ret; | ||
697 | 717 | ||
698 | m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); | 718 | m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6); |
699 | if (lines == 2) { | 719 | if (lines == 2) { |
@@ -704,7 +724,26 @@ static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd, | |||
704 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); | 724 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); |
705 | if (gru_wait(cb) != CBS_IDLE) | 725 | if (gru_wait(cb) != CBS_IDLE) |
706 | return MQE_UNEXPECTED_CB_ERR; | 726 | return MQE_UNEXPECTED_CB_ERR; |
707 | send_message_queue_interrupt(mqd); | 727 | |
728 | if (!mqd->interrupt_vector) | ||
729 | return MQE_OK; | ||
730 | |||
731 | /* | ||
732 | * Send a cross-partition interrupt to the SSI that contains the target | ||
733 | * message queue. Normally, the interrupt is automatically delivered by | ||
734 | * hardware but some error conditions require explicit delivery. | ||
735 | * Use the GRU to deliver the interrupt. Otherwise partition failures | ||
736 | * could cause unrecovered errors. | ||
737 | */ | ||
738 | gpa = uv_global_gru_mmr_address(mqd->interrupt_pnode, UVH_IPI_INT); | ||
739 | save = *val; | ||
740 | *val = uv_hub_ipi_value(mqd->interrupt_apicid, mqd->interrupt_vector, | ||
741 | dest_Fixed); | ||
742 | gru_vstore_phys(cb, gpa, gru_get_tri(mesg), IAA_REGISTER, IMA); | ||
743 | ret = gru_wait(cb); | ||
744 | *val = save; | ||
745 | if (ret != CBS_IDLE) | ||
746 | return MQE_UNEXPECTED_CB_ERR; | ||
708 | return MQE_OK; | 747 | return MQE_OK; |
709 | } | 748 | } |
710 | 749 | ||
@@ -739,6 +778,9 @@ static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd, | |||
739 | STAT(mesq_send_put_nacked); | 778 | STAT(mesq_send_put_nacked); |
740 | ret = send_message_put_nacked(cb, mqd, mesg, lines); | 779 | ret = send_message_put_nacked(cb, mqd, mesg, lines); |
741 | break; | 780 | break; |
781 | case CBSS_PAGE_OVERFLOW: | ||
782 | STAT(mesq_page_overflow); | ||
783 | /* fallthru */ | ||
742 | default: | 784 | default: |
743 | BUG(); | 785 | BUG(); |
744 | } | 786 | } |
@@ -831,7 +873,6 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd) | |||
831 | int present = mhdr->present; | 873 | int present = mhdr->present; |
832 | 874 | ||
833 | /* skip NOOP messages */ | 875 | /* skip NOOP messages */ |
834 | STAT(mesq_receive); | ||
835 | while (present == MQS_NOOP) { | 876 | while (present == MQS_NOOP) { |
836 | gru_free_message(mqd, mhdr); | 877 | gru_free_message(mqd, mhdr); |
837 | mhdr = mq->next; | 878 | mhdr = mq->next; |
@@ -851,6 +892,7 @@ void *gru_get_next_message(struct gru_message_queue_desc *mqd) | |||
851 | if (mhdr->lines == 2) | 892 | if (mhdr->lines == 2) |
852 | restore_present2(mhdr, mhdr->present2); | 893 | restore_present2(mhdr, mhdr->present2); |
853 | 894 | ||
895 | STAT(mesq_receive); | ||
854 | return mhdr; | 896 | return mhdr; |
855 | } | 897 | } |
856 | EXPORT_SYMBOL_GPL(gru_get_next_message); | 898 | EXPORT_SYMBOL_GPL(gru_get_next_message); |
@@ -858,6 +900,29 @@ EXPORT_SYMBOL_GPL(gru_get_next_message); | |||
858 | /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ | 900 | /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ |
859 | 901 | ||
860 | /* | 902 | /* |
903 | * Load a DW from a global GPA. The GPA can be a memory or MMR address. | ||
904 | */ | ||
905 | int gru_read_gpa(unsigned long *value, unsigned long gpa) | ||
906 | { | ||
907 | void *cb; | ||
908 | void *dsr; | ||
909 | int ret, iaa; | ||
910 | |||
911 | STAT(read_gpa); | ||
912 | if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) | ||
913 | return MQE_BUG_NO_RESOURCES; | ||
914 | iaa = gpa >> 62; | ||
915 | gru_vload_phys(cb, gpa, gru_get_tri(dsr), iaa, IMA); | ||
916 | ret = gru_wait(cb); | ||
917 | if (ret == CBS_IDLE) | ||
918 | *value = *(unsigned long *)dsr; | ||
919 | gru_free_cpu_resources(cb, dsr); | ||
920 | return ret; | ||
921 | } | ||
922 | EXPORT_SYMBOL_GPL(gru_read_gpa); | ||
923 | |||
924 | |||
925 | /* | ||
861 | * Copy a block of data using the GRU resources | 926 | * Copy a block of data using the GRU resources |
862 | */ | 927 | */ |
863 | int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, | 928 | int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, |
@@ -898,24 +963,24 @@ static int quicktest0(unsigned long arg) | |||
898 | 963 | ||
899 | gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); | 964 | gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); |
900 | if (gru_wait(cb) != CBS_IDLE) { | 965 | if (gru_wait(cb) != CBS_IDLE) { |
901 | printk(KERN_DEBUG "GRU quicktest0: CBR failure 1\n"); | 966 | printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 1\n", smp_processor_id()); |
902 | goto done; | 967 | goto done; |
903 | } | 968 | } |
904 | 969 | ||
905 | if (*p != MAGIC) { | 970 | if (*p != MAGIC) { |
906 | printk(KERN_DEBUG "GRU: quicktest0 bad magic 0x%lx\n", *p); | 971 | printk(KERN_DEBUG "GRU:%d quicktest0 bad magic 0x%lx\n", smp_processor_id(), *p); |
907 | goto done; | 972 | goto done; |
908 | } | 973 | } |
909 | gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); | 974 | gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA); |
910 | if (gru_wait(cb) != CBS_IDLE) { | 975 | if (gru_wait(cb) != CBS_IDLE) { |
911 | printk(KERN_DEBUG "GRU quicktest0: CBR failure 2\n"); | 976 | printk(KERN_DEBUG "GRU:%d quicktest0: CBR failure 2\n", smp_processor_id()); |
912 | goto done; | 977 | goto done; |
913 | } | 978 | } |
914 | 979 | ||
915 | if (word0 != word1 || word1 != MAGIC) { | 980 | if (word0 != word1 || word1 != MAGIC) { |
916 | printk(KERN_DEBUG | 981 | printk(KERN_DEBUG |
917 | "GRU quicktest0 err: found 0x%lx, expected 0x%lx\n", | 982 | "GRU:%d quicktest0 err: found 0x%lx, expected 0x%lx\n", |
918 | word1, MAGIC); | 983 | smp_processor_id(), word1, MAGIC); |
919 | goto done; | 984 | goto done; |
920 | } | 985 | } |
921 | ret = 0; | 986 | ret = 0; |
@@ -952,8 +1017,11 @@ static int quicktest1(unsigned long arg) | |||
952 | if (ret) | 1017 | if (ret) |
953 | break; | 1018 | break; |
954 | } | 1019 | } |
955 | if (ret != MQE_QUEUE_FULL || i != 4) | 1020 | if (ret != MQE_QUEUE_FULL || i != 4) { |
1021 | printk(KERN_DEBUG "GRU:%d quicktest1: unexpect status %d, i %d\n", | ||
1022 | smp_processor_id(), ret, i); | ||
956 | goto done; | 1023 | goto done; |
1024 | } | ||
957 | 1025 | ||
958 | for (i = 0; i < 6; i++) { | 1026 | for (i = 0; i < 6; i++) { |
959 | m = gru_get_next_message(&mqd); | 1027 | m = gru_get_next_message(&mqd); |
@@ -961,7 +1029,12 @@ static int quicktest1(unsigned long arg) | |||
961 | break; | 1029 | break; |
962 | gru_free_message(&mqd, m); | 1030 | gru_free_message(&mqd, m); |
963 | } | 1031 | } |
964 | ret = (i == 4) ? 0 : -EIO; | 1032 | if (i != 4) { |
1033 | printk(KERN_DEBUG "GRU:%d quicktest2: bad message, i %d, m %p, m8 %d\n", | ||
1034 | smp_processor_id(), i, m, m ? m[8] : -1); | ||
1035 | goto done; | ||
1036 | } | ||
1037 | ret = 0; | ||
965 | 1038 | ||
966 | done: | 1039 | done: |
967 | kfree(p); | 1040 | kfree(p); |
@@ -977,6 +1050,7 @@ static int quicktest2(unsigned long arg) | |||
977 | int ret = 0; | 1050 | int ret = 0; |
978 | unsigned long *buf; | 1051 | unsigned long *buf; |
979 | void *cb0, *cb; | 1052 | void *cb0, *cb; |
1053 | struct gru_control_block_status *gen; | ||
980 | int i, k, istatus, bytes; | 1054 | int i, k, istatus, bytes; |
981 | 1055 | ||
982 | bytes = numcb * 4 * 8; | 1056 | bytes = numcb * 4 * 8; |
@@ -996,20 +1070,30 @@ static int quicktest2(unsigned long arg) | |||
996 | XTYPE_DW, 4, 1, IMA_INTERRUPT); | 1070 | XTYPE_DW, 4, 1, IMA_INTERRUPT); |
997 | 1071 | ||
998 | ret = 0; | 1072 | ret = 0; |
999 | for (k = 0; k < numcb; k++) { | 1073 | k = numcb; |
1074 | do { | ||
1000 | gru_wait_async_cbr(han); | 1075 | gru_wait_async_cbr(han); |
1001 | for (i = 0; i < numcb; i++) { | 1076 | for (i = 0; i < numcb; i++) { |
1002 | cb = cb0 + i * GRU_HANDLE_STRIDE; | 1077 | cb = cb0 + i * GRU_HANDLE_STRIDE; |
1003 | istatus = gru_check_status(cb); | 1078 | istatus = gru_check_status(cb); |
1004 | if (istatus == CBS_ACTIVE) | 1079 | if (istatus != CBS_ACTIVE && istatus != CBS_CALL_OS) |
1005 | continue; | 1080 | break; |
1006 | if (istatus == CBS_EXCEPTION) | ||
1007 | ret = -EFAULT; | ||
1008 | else if (buf[i] || buf[i + 1] || buf[i + 2] || | ||
1009 | buf[i + 3]) | ||
1010 | ret = -EIO; | ||
1011 | } | 1081 | } |
1012 | } | 1082 | if (i == numcb) |
1083 | continue; | ||
1084 | if (istatus != CBS_IDLE) { | ||
1085 | printk(KERN_DEBUG "GRU:%d quicktest2: cb %d, exception\n", smp_processor_id(), i); | ||
1086 | ret = -EFAULT; | ||
1087 | } else if (buf[4 * i] || buf[4 * i + 1] || buf[4 * i + 2] || | ||
1088 | buf[4 * i + 3]) { | ||
1089 | printk(KERN_DEBUG "GRU:%d quicktest2:cb %d, buf 0x%lx, 0x%lx, 0x%lx, 0x%lx\n", | ||
1090 | smp_processor_id(), i, buf[4 * i], buf[4 * i + 1], buf[4 * i + 2], buf[4 * i + 3]); | ||
1091 | ret = -EIO; | ||
1092 | } | ||
1093 | k--; | ||
1094 | gen = cb; | ||
1095 | gen->istatus = CBS_CALL_OS; /* don't handle this CBR again */ | ||
1096 | } while (k); | ||
1013 | BUG_ON(cmp.done); | 1097 | BUG_ON(cmp.done); |
1014 | 1098 | ||
1015 | gru_unlock_async_resource(han); | 1099 | gru_unlock_async_resource(han); |
@@ -1019,6 +1103,22 @@ done: | |||
1019 | return ret; | 1103 | return ret; |
1020 | } | 1104 | } |
1021 | 1105 | ||
1106 | #define BUFSIZE 200 | ||
1107 | static int quicktest3(unsigned long arg) | ||
1108 | { | ||
1109 | char buf1[BUFSIZE], buf2[BUFSIZE]; | ||
1110 | int ret = 0; | ||
1111 | |||
1112 | memset(buf2, 0, sizeof(buf2)); | ||
1113 | memset(buf1, get_cycles() & 255, sizeof(buf1)); | ||
1114 | gru_copy_gpa(uv_gpa(buf2), uv_gpa(buf1), BUFSIZE); | ||
1115 | if (memcmp(buf1, buf2, BUFSIZE)) { | ||
1116 | printk(KERN_DEBUG "GRU:%d quicktest3 error\n", smp_processor_id()); | ||
1117 | ret = -EIO; | ||
1118 | } | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1022 | /* | 1122 | /* |
1023 | * Debugging only. User hook for various kernel tests | 1123 | * Debugging only. User hook for various kernel tests |
1024 | * of driver & gru. | 1124 | * of driver & gru. |
@@ -1037,6 +1137,9 @@ int gru_ktest(unsigned long arg) | |||
1037 | case 2: | 1137 | case 2: |
1038 | ret = quicktest2(arg); | 1138 | ret = quicktest2(arg); |
1039 | break; | 1139 | break; |
1140 | case 3: | ||
1141 | ret = quicktest3(arg); | ||
1142 | break; | ||
1040 | case 99: | 1143 | case 99: |
1041 | ret = gru_free_kernel_contexts(); | 1144 | ret = gru_free_kernel_contexts(); |
1042 | break; | 1145 | break; |