diff options
| -rw-r--r-- | drivers/misc/sgi-gru/grufault.c | 89 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/grufile.c | 1 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/gruhandles.c | 11 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/gruhandles.h | 20 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/grukservices.c | 2 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/grulib.h | 1 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/grumain.c | 8 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/gruprocfs.c | 4 | ||||
| -rw-r--r-- | drivers/misc/sgi-gru/grutables.h | 9 |
9 files changed, 129 insertions, 16 deletions
diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 7d757e9c42f0..a1b3a1d66af5 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c | |||
| @@ -290,6 +290,61 @@ upm: | |||
| 290 | 290 | ||
| 291 | 291 | ||
| 292 | /* | 292 | /* |
| 293 | * Flush a CBE from cache. The CBE is clean in the cache. Dirty the | ||
| 294 | * CBE cacheline so that the line will be written back to home agent. | ||
| 295 | * Otherwise the line may be silently dropped. This has no impact | ||
| 296 | * except on performance. | ||
| 297 | */ | ||
| 298 | static void gru_flush_cache_cbe(struct gru_control_block_extended *cbe) | ||
| 299 | { | ||
| 300 | if (unlikely(cbe)) { | ||
| 301 | cbe->cbrexecstatus = 0; /* make CL dirty */ | ||
| 302 | gru_flush_cache(cbe); | ||
| 303 | } | ||
| 304 | } | ||
| 305 | |||
| 306 | /* | ||
| 307 | * Preload the TLB with entries that may be required. Currently, preloading | ||
| 308 | * is implemented only for BCOPY. Preload <tlb_preload_count> pages OR to | ||
| 309 | * the end of the bcopy tranfer, whichever is smaller. | ||
| 310 | */ | ||
| 311 | static void gru_preload_tlb(struct gru_state *gru, | ||
| 312 | struct gru_thread_state *gts, int atomic, | ||
| 313 | unsigned long fault_vaddr, int asid, int write, | ||
| 314 | unsigned char tlb_preload_count, | ||
| 315 | struct gru_tlb_fault_handle *tfh, | ||
| 316 | struct gru_control_block_extended *cbe) | ||
| 317 | { | ||
| 318 | unsigned long vaddr = 0, gpa; | ||
| 319 | int ret, pageshift; | ||
| 320 | |||
| 321 | if (cbe->opccpy != OP_BCOPY) | ||
| 322 | return; | ||
| 323 | |||
| 324 | if (fault_vaddr == cbe->cbe_baddr0) | ||
| 325 | vaddr = fault_vaddr + GRU_CACHE_LINE_BYTES * cbe->cbe_src_cl - 1; | ||
| 326 | else if (fault_vaddr == cbe->cbe_baddr1) | ||
| 327 | vaddr = fault_vaddr + (1 << cbe->xtypecpy) * cbe->cbe_nelemcur - 1; | ||
| 328 | |||
| 329 | fault_vaddr &= PAGE_MASK; | ||
| 330 | vaddr &= PAGE_MASK; | ||
| 331 | vaddr = min(vaddr, fault_vaddr + tlb_preload_count * PAGE_SIZE); | ||
| 332 | |||
| 333 | while (vaddr > fault_vaddr) { | ||
| 334 | ret = gru_vtop(gts, vaddr, write, atomic, &gpa, &pageshift); | ||
| 335 | if (ret || tfh_write_only(tfh, gpa, GAA_RAM, vaddr, asid, write, | ||
| 336 | GRU_PAGESIZE(pageshift))) | ||
| 337 | return; | ||
| 338 | gru_dbg(grudev, | ||
| 339 | "%s: gid %d, gts 0x%p, tfh 0x%p, vaddr 0x%lx, asid 0x%x, rw %d, ps %d, gpa 0x%lx\n", | ||
| 340 | atomic ? "atomic" : "non-atomic", gru->gs_gid, gts, tfh, | ||
| 341 | vaddr, asid, write, pageshift, gpa); | ||
| 342 | vaddr -= PAGE_SIZE; | ||
| 343 | STAT(tlb_preload_page); | ||
| 344 | } | ||
| 345 | } | ||
| 346 | |||
| 347 | /* | ||
| 293 | * Drop a TLB entry into the GRU. The fault is described by info in an TFH. | 348 | * Drop a TLB entry into the GRU. The fault is described by info in an TFH. |
| 294 | * Input: | 349 | * Input: |
| 295 | * cb Address of user CBR. Null if not running in user context | 350 | * cb Address of user CBR. Null if not running in user context |
| @@ -303,6 +358,8 @@ static int gru_try_dropin(struct gru_thread_state *gts, | |||
| 303 | struct gru_tlb_fault_handle *tfh, | 358 | struct gru_tlb_fault_handle *tfh, |
| 304 | struct gru_instruction_bits *cbk) | 359 | struct gru_instruction_bits *cbk) |
| 305 | { | 360 | { |
| 361 | struct gru_control_block_extended *cbe = NULL; | ||
| 362 | unsigned char tlb_preload_count = gts->ts_tlb_preload_count; | ||
| 306 | int pageshift = 0, asid, write, ret, atomic = !cbk, indexway; | 363 | int pageshift = 0, asid, write, ret, atomic = !cbk, indexway; |
| 307 | unsigned long gpa = 0, vaddr = 0; | 364 | unsigned long gpa = 0, vaddr = 0; |
| 308 | 365 | ||
| @@ -314,6 +371,14 @@ static int gru_try_dropin(struct gru_thread_state *gts, | |||
| 314 | */ | 371 | */ |
| 315 | 372 | ||
| 316 | /* | 373 | /* |
| 374 | * Prefetch the CBE if doing TLB preloading | ||
| 375 | */ | ||
| 376 | if (unlikely(tlb_preload_count)) { | ||
| 377 | cbe = gru_tfh_to_cbe(tfh); | ||
| 378 | prefetchw(cbe); | ||
| 379 | } | ||
| 380 | |||
| 381 | /* | ||
| 317 | * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. | 382 | * Error if TFH state is IDLE or FMM mode & the user issuing a UPM call. |
| 318 | * Might be a hardware race OR a stupid user. Ignore FMM because FMM | 383 | * Might be a hardware race OR a stupid user. Ignore FMM because FMM |
| 319 | * is a transient state. | 384 | * is a transient state. |
| @@ -359,6 +424,12 @@ static int gru_try_dropin(struct gru_thread_state *gts, | |||
| 359 | goto failupm; | 424 | goto failupm; |
| 360 | } | 425 | } |
| 361 | } | 426 | } |
| 427 | |||
| 428 | if (unlikely(cbe) && pageshift == PAGE_SHIFT) { | ||
| 429 | gru_preload_tlb(gts->ts_gru, gts, atomic, vaddr, asid, write, tlb_preload_count, tfh, cbe); | ||
| 430 | gru_flush_cache_cbe(cbe); | ||
| 431 | } | ||
| 432 | |||
| 362 | gru_cb_set_istatus_active(cbk); | 433 | gru_cb_set_istatus_active(cbk); |
| 363 | tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, | 434 | tfh_write_restart(tfh, gpa, GAA_RAM, vaddr, asid, write, |
| 364 | GRU_PAGESIZE(pageshift)); | 435 | GRU_PAGESIZE(pageshift)); |
| @@ -378,11 +449,13 @@ failnoasid: | |||
| 378 | tfh_user_polling_mode(tfh); | 449 | tfh_user_polling_mode(tfh); |
| 379 | else | 450 | else |
| 380 | gru_flush_cache(tfh); | 451 | gru_flush_cache(tfh); |
| 452 | gru_flush_cache_cbe(cbe); | ||
| 381 | return -EAGAIN; | 453 | return -EAGAIN; |
| 382 | 454 | ||
| 383 | failupm: | 455 | failupm: |
| 384 | /* Atomic failure switch CBR to UPM */ | 456 | /* Atomic failure switch CBR to UPM */ |
| 385 | tfh_user_polling_mode(tfh); | 457 | tfh_user_polling_mode(tfh); |
| 458 | gru_flush_cache_cbe(cbe); | ||
| 386 | STAT(tlb_dropin_fail_upm); | 459 | STAT(tlb_dropin_fail_upm); |
| 387 | gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); | 460 | gru_dbg(grudev, "FAILED upm tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); |
| 388 | return 1; | 461 | return 1; |
| @@ -390,6 +463,7 @@ failupm: | |||
| 390 | failfmm: | 463 | failfmm: |
| 391 | /* FMM state on UPM call */ | 464 | /* FMM state on UPM call */ |
| 392 | gru_flush_cache(tfh); | 465 | gru_flush_cache(tfh); |
| 466 | gru_flush_cache_cbe(cbe); | ||
| 393 | STAT(tlb_dropin_fail_fmm); | 467 | STAT(tlb_dropin_fail_fmm); |
| 394 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); | 468 | gru_dbg(grudev, "FAILED fmm tfh: 0x%p, state %d\n", tfh, tfh->state); |
| 395 | return 0; | 469 | return 0; |
| @@ -397,6 +471,7 @@ failfmm: | |||
| 397 | failnoexception: | 471 | failnoexception: |
| 398 | /* TFH status did not show exception pending */ | 472 | /* TFH status did not show exception pending */ |
| 399 | gru_flush_cache(tfh); | 473 | gru_flush_cache(tfh); |
| 474 | gru_flush_cache_cbe(cbe); | ||
| 400 | if (cbk) | 475 | if (cbk) |
| 401 | gru_flush_cache(cbk); | 476 | gru_flush_cache(cbk); |
| 402 | STAT(tlb_dropin_fail_no_exception); | 477 | STAT(tlb_dropin_fail_no_exception); |
| @@ -407,6 +482,7 @@ failnoexception: | |||
| 407 | failidle: | 482 | failidle: |
| 408 | /* TFH state was idle - no miss pending */ | 483 | /* TFH state was idle - no miss pending */ |
| 409 | gru_flush_cache(tfh); | 484 | gru_flush_cache(tfh); |
| 485 | gru_flush_cache_cbe(cbe); | ||
| 410 | if (cbk) | 486 | if (cbk) |
| 411 | gru_flush_cache(cbk); | 487 | gru_flush_cache(cbk); |
| 412 | STAT(tlb_dropin_fail_idle); | 488 | STAT(tlb_dropin_fail_idle); |
| @@ -416,6 +492,7 @@ failidle: | |||
| 416 | failinval: | 492 | failinval: |
| 417 | /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ | 493 | /* All errors (atomic & non-atomic) switch CBR to EXCEPTION state */ |
| 418 | tfh_exception(tfh); | 494 | tfh_exception(tfh); |
| 495 | gru_flush_cache_cbe(cbe); | ||
| 419 | STAT(tlb_dropin_fail_invalid); | 496 | STAT(tlb_dropin_fail_invalid); |
| 420 | gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); | 497 | gru_dbg(grudev, "FAILED inval tfh: 0x%p, vaddr 0x%lx\n", tfh, vaddr); |
| 421 | return -EFAULT; | 498 | return -EFAULT; |
| @@ -426,6 +503,7 @@ failactive: | |||
| 426 | tfh_user_polling_mode(tfh); | 503 | tfh_user_polling_mode(tfh); |
| 427 | else | 504 | else |
| 428 | gru_flush_cache(tfh); | 505 | gru_flush_cache(tfh); |
| 506 | gru_flush_cache_cbe(cbe); | ||
| 429 | STAT(tlb_dropin_fail_range_active); | 507 | STAT(tlb_dropin_fail_range_active); |
| 430 | gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", | 508 | gru_dbg(grudev, "FAILED range active: tfh 0x%p, vaddr 0x%lx\n", |
| 431 | tfh, vaddr); | 509 | tfh, vaddr); |
| @@ -627,7 +705,7 @@ int gru_get_exception_detail(unsigned long arg) | |||
| 627 | excdet.exceptdet1 = cbe->idef3upd; | 705 | excdet.exceptdet1 = cbe->idef3upd; |
| 628 | excdet.cbrstate = cbe->cbrstate; | 706 | excdet.cbrstate = cbe->cbrstate; |
| 629 | excdet.cbrexecstatus = cbe->cbrexecstatus; | 707 | excdet.cbrexecstatus = cbe->cbrexecstatus; |
| 630 | gru_flush_cache(cbe); | 708 | gru_flush_cache_cbe(cbe); |
| 631 | ret = 0; | 709 | ret = 0; |
| 632 | } else { | 710 | } else { |
| 633 | ret = -EAGAIN; | 711 | ret = -EAGAIN; |
| @@ -770,9 +848,12 @@ int gru_set_context_option(unsigned long arg) | |||
| 770 | return -EFAULT; | 848 | return -EFAULT; |
| 771 | gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); | 849 | gru_dbg(grudev, "op %d, gseg 0x%lx, value1 0x%lx\n", req.op, req.gseg, req.val1); |
| 772 | 850 | ||
| 773 | gts = gru_alloc_locked_gts(req.gseg); | 851 | gts = gru_find_lock_gts(req.gseg); |
| 774 | if (IS_ERR(gts)) | 852 | if (!gts) { |
| 775 | return PTR_ERR(gts); | 853 | gts = gru_alloc_locked_gts(req.gseg); |
| 854 | if (IS_ERR(gts)) | ||
| 855 | return PTR_ERR(gts); | ||
| 856 | } | ||
| 776 | 857 | ||
| 777 | switch (req.op) { | 858 | switch (req.op) { |
| 778 | case sco_blade_chiplet: | 859 | case sco_blade_chiplet: |
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c index 9d41208a6c92..cb3b4d228475 100644 --- a/drivers/misc/sgi-gru/grufile.c +++ b/drivers/misc/sgi-gru/grufile.c | |||
| @@ -152,6 +152,7 @@ static int gru_create_new_context(unsigned long arg) | |||
| 152 | vdata->vd_dsr_au_count = | 152 | vdata->vd_dsr_au_count = |
| 153 | GRU_DS_BYTES_TO_AU(req.data_segment_bytes); | 153 | GRU_DS_BYTES_TO_AU(req.data_segment_bytes); |
| 154 | vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); | 154 | vdata->vd_cbr_au_count = GRU_CB_COUNT_TO_AU(req.control_blocks); |
| 155 | vdata->vd_tlb_preload_count = req.tlb_preload_count; | ||
| 155 | ret = 0; | 156 | ret = 0; |
| 156 | } | 157 | } |
| 157 | up_write(¤t->mm->mmap_sem); | 158 | up_write(¤t->mm->mmap_sem); |
diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c index 66d67d9bc9b6..2f30badc6ffd 100644 --- a/drivers/misc/sgi-gru/gruhandles.c +++ b/drivers/misc/sgi-gru/gruhandles.c | |||
| @@ -165,17 +165,20 @@ int tgh_invalidate(struct gru_tlb_global_handle *tgh, | |||
| 165 | return wait_instruction_complete(tgh, tghop_invalidate); | 165 | return wait_instruction_complete(tgh, tghop_invalidate); |
| 166 | } | 166 | } |
| 167 | 167 | ||
| 168 | void tfh_write_only(struct gru_tlb_fault_handle *tfh, | 168 | int tfh_write_only(struct gru_tlb_fault_handle *tfh, |
| 169 | unsigned long pfn, unsigned long vaddr, | 169 | unsigned long paddr, int gaa, |
| 170 | int asid, int dirty, int pagesize) | 170 | unsigned long vaddr, int asid, int dirty, |
| 171 | int pagesize) | ||
| 171 | { | 172 | { |
| 172 | tfh->fillasid = asid; | 173 | tfh->fillasid = asid; |
| 173 | tfh->fillvaddr = vaddr; | 174 | tfh->fillvaddr = vaddr; |
| 174 | tfh->pfn = pfn; | 175 | tfh->pfn = paddr >> GRU_PADDR_SHIFT; |
| 176 | tfh->gaa = gaa; | ||
| 175 | tfh->dirty = dirty; | 177 | tfh->dirty = dirty; |
| 176 | tfh->pagesize = pagesize; | 178 | tfh->pagesize = pagesize; |
| 177 | tfh->opc = TFHOP_WRITE_ONLY; | 179 | tfh->opc = TFHOP_WRITE_ONLY; |
| 178 | start_instruction(tfh); | 180 | start_instruction(tfh); |
| 181 | return wait_instruction_complete(tfh, tfhop_write_only); | ||
| 179 | } | 182 | } |
| 180 | 183 | ||
| 181 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, | 184 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, |
diff --git a/drivers/misc/sgi-gru/gruhandles.h b/drivers/misc/sgi-gru/gruhandles.h index 47b762f89e0d..ea584ebf65b1 100644 --- a/drivers/misc/sgi-gru/gruhandles.h +++ b/drivers/misc/sgi-gru/gruhandles.h | |||
| @@ -164,6 +164,16 @@ static inline void *gru_chiplet_vaddr(void *vaddr, int pnode, int chiplet) | |||
| 164 | return vaddr + GRU_SIZE * (2 * pnode + chiplet); | 164 | return vaddr + GRU_SIZE * (2 * pnode + chiplet); |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | static inline struct gru_control_block_extended *gru_tfh_to_cbe( | ||
| 168 | struct gru_tlb_fault_handle *tfh) | ||
| 169 | { | ||
| 170 | unsigned long cbe; | ||
| 171 | |||
| 172 | cbe = (unsigned long)tfh - GRU_TFH_BASE + GRU_CBE_BASE; | ||
| 173 | return (struct gru_control_block_extended*)cbe; | ||
| 174 | } | ||
| 175 | |||
| 176 | |||
| 167 | 177 | ||
| 168 | 178 | ||
| 169 | /* | 179 | /* |
| @@ -446,6 +456,12 @@ struct gru_control_block_extended { | |||
| 446 | unsigned int cbrexecstatus:8; | 456 | unsigned int cbrexecstatus:8; |
| 447 | }; | 457 | }; |
| 448 | 458 | ||
| 459 | /* CBE fields for active BCOPY instructions */ | ||
| 460 | #define cbe_baddr0 idef1upd | ||
| 461 | #define cbe_baddr1 idef3upd | ||
| 462 | #define cbe_src_cl idef6cpy | ||
| 463 | #define cbe_nelemcur idef5upd | ||
| 464 | |||
| 449 | enum gru_cbr_state { | 465 | enum gru_cbr_state { |
| 450 | CBRSTATE_INACTIVE, | 466 | CBRSTATE_INACTIVE, |
| 451 | CBRSTATE_IDLE, | 467 | CBRSTATE_IDLE, |
| @@ -493,8 +509,8 @@ int cch_interrupt_sync(struct gru_context_configuration_handle *cch); | |||
| 493 | int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, | 509 | int tgh_invalidate(struct gru_tlb_global_handle *tgh, unsigned long vaddr, |
| 494 | unsigned long vaddrmask, int asid, int pagesize, int global, int n, | 510 | unsigned long vaddrmask, int asid, int pagesize, int global, int n, |
| 495 | unsigned short ctxbitmap); | 511 | unsigned short ctxbitmap); |
| 496 | void tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long pfn, | 512 | int tfh_write_only(struct gru_tlb_fault_handle *tfh, unsigned long paddr, |
| 497 | unsigned long vaddr, int asid, int dirty, int pagesize); | 513 | int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); |
| 498 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, | 514 | void tfh_write_restart(struct gru_tlb_fault_handle *tfh, unsigned long paddr, |
| 499 | int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); | 515 | int gaa, unsigned long vaddr, int asid, int dirty, int pagesize); |
| 500 | void tfh_restart(struct gru_tlb_fault_handle *tfh); | 516 | void tfh_restart(struct gru_tlb_fault_handle *tfh); |
diff --git a/drivers/misc/sgi-gru/grukservices.c b/drivers/misc/sgi-gru/grukservices.c index 4da6f56833d1..d9ff0289a1c3 100644 --- a/drivers/misc/sgi-gru/grukservices.c +++ b/drivers/misc/sgi-gru/grukservices.c | |||
| @@ -161,7 +161,7 @@ static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id) | |||
| 161 | down_write(&bs->bs_kgts_sema); | 161 | down_write(&bs->bs_kgts_sema); |
| 162 | 162 | ||
| 163 | if (!bs->bs_kgts) { | 163 | if (!bs->bs_kgts) { |
| 164 | bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0); | 164 | bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0, 0); |
| 165 | bs->bs_kgts->ts_user_blade_id = blade_id; | 165 | bs->bs_kgts->ts_user_blade_id = blade_id; |
| 166 | } | 166 | } |
| 167 | kgts = bs->bs_kgts; | 167 | kgts = bs->bs_kgts; |
diff --git a/drivers/misc/sgi-gru/grulib.h b/drivers/misc/sgi-gru/grulib.h index e033b36df7e0..c6928af7393a 100644 --- a/drivers/misc/sgi-gru/grulib.h +++ b/drivers/misc/sgi-gru/grulib.h | |||
| @@ -86,6 +86,7 @@ struct gru_create_context_req { | |||
| 86 | unsigned int control_blocks; | 86 | unsigned int control_blocks; |
| 87 | unsigned int maximum_thread_count; | 87 | unsigned int maximum_thread_count; |
| 88 | unsigned int options; | 88 | unsigned int options; |
| 89 | unsigned char tlb_preload_count; | ||
| 89 | }; | 90 | }; |
| 90 | 91 | ||
| 91 | /* | 92 | /* |
diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index ebabbdcbb97f..ade0925eab0e 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c | |||
| @@ -316,7 +316,8 @@ static struct gru_thread_state *gru_find_current_gts_nolock(struct gru_vma_data | |||
| 316 | * Allocate a thread state structure. | 316 | * Allocate a thread state structure. |
| 317 | */ | 317 | */ |
| 318 | struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | 318 | struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, |
| 319 | int cbr_au_count, int dsr_au_count, int options, int tsid) | 319 | int cbr_au_count, int dsr_au_count, |
| 320 | unsigned char tlb_preload_count, int options, int tsid) | ||
| 320 | { | 321 | { |
| 321 | struct gru_thread_state *gts; | 322 | struct gru_thread_state *gts; |
| 322 | struct gru_mm_struct *gms; | 323 | struct gru_mm_struct *gms; |
| @@ -334,6 +335,7 @@ struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | |||
| 334 | mutex_init(>s->ts_ctxlock); | 335 | mutex_init(>s->ts_ctxlock); |
| 335 | gts->ts_cbr_au_count = cbr_au_count; | 336 | gts->ts_cbr_au_count = cbr_au_count; |
| 336 | gts->ts_dsr_au_count = dsr_au_count; | 337 | gts->ts_dsr_au_count = dsr_au_count; |
| 338 | gts->ts_tlb_preload_count = tlb_preload_count; | ||
| 337 | gts->ts_user_options = options; | 339 | gts->ts_user_options = options; |
| 338 | gts->ts_user_blade_id = -1; | 340 | gts->ts_user_blade_id = -1; |
| 339 | gts->ts_user_chiplet_id = -1; | 341 | gts->ts_user_chiplet_id = -1; |
| @@ -403,7 +405,9 @@ struct gru_thread_state *gru_alloc_thread_state(struct vm_area_struct *vma, | |||
| 403 | struct gru_vma_data *vdata = vma->vm_private_data; | 405 | struct gru_vma_data *vdata = vma->vm_private_data; |
| 404 | struct gru_thread_state *gts, *ngts; | 406 | struct gru_thread_state *gts, *ngts; |
| 405 | 407 | ||
| 406 | gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, vdata->vd_dsr_au_count, | 408 | gts = gru_alloc_gts(vma, vdata->vd_cbr_au_count, |
| 409 | vdata->vd_dsr_au_count, | ||
| 410 | vdata->vd_tlb_preload_count, | ||
| 407 | vdata->vd_user_options, tsid); | 411 | vdata->vd_user_options, tsid); |
| 408 | if (IS_ERR(gts)) | 412 | if (IS_ERR(gts)) |
| 409 | return gts; | 413 | return gts; |
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c index 0a57ab29cd30..54a5a1c35ad1 100644 --- a/drivers/misc/sgi-gru/gruprocfs.c +++ b/drivers/misc/sgi-gru/gruprocfs.c | |||
| @@ -76,6 +76,7 @@ static int statistics_show(struct seq_file *s, void *p) | |||
| 76 | printstat(s, check_context_retarget_intr); | 76 | printstat(s, check_context_retarget_intr); |
| 77 | printstat(s, check_context_unload); | 77 | printstat(s, check_context_unload); |
| 78 | printstat(s, tlb_dropin); | 78 | printstat(s, tlb_dropin); |
| 79 | printstat(s, tlb_preload_page); | ||
| 79 | printstat(s, tlb_dropin_fail_no_asid); | 80 | printstat(s, tlb_dropin_fail_no_asid); |
| 80 | printstat(s, tlb_dropin_fail_upm); | 81 | printstat(s, tlb_dropin_fail_upm); |
| 81 | printstat(s, tlb_dropin_fail_invalid); | 82 | printstat(s, tlb_dropin_fail_invalid); |
| @@ -127,7 +128,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p) | |||
| 127 | int op; | 128 | int op; |
| 128 | unsigned long total, count, max; | 129 | unsigned long total, count, max; |
| 129 | static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", | 130 | static char *id[] = {"cch_allocate", "cch_start", "cch_interrupt", |
| 130 | "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"}; | 131 | "cch_interrupt_sync", "cch_deallocate", "tfh_write_only", |
| 132 | "tfh_write_restart", "tgh_invalidate"}; | ||
| 131 | 133 | ||
| 132 | seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); | 134 | seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); |
| 133 | for (op = 0; op < mcsop_last; op++) { | 135 | for (op = 0; op < mcsop_last; op++) { |
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 76fe2987fc9f..adaf691d59f5 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h | |||
| @@ -202,6 +202,7 @@ struct gru_stats_s { | |||
| 202 | atomic_long_t check_context_retarget_intr; | 202 | atomic_long_t check_context_retarget_intr; |
| 203 | atomic_long_t check_context_unload; | 203 | atomic_long_t check_context_unload; |
| 204 | atomic_long_t tlb_dropin; | 204 | atomic_long_t tlb_dropin; |
| 205 | atomic_long_t tlb_preload_page; | ||
| 205 | atomic_long_t tlb_dropin_fail_no_asid; | 206 | atomic_long_t tlb_dropin_fail_no_asid; |
| 206 | atomic_long_t tlb_dropin_fail_upm; | 207 | atomic_long_t tlb_dropin_fail_upm; |
| 207 | atomic_long_t tlb_dropin_fail_invalid; | 208 | atomic_long_t tlb_dropin_fail_invalid; |
| @@ -245,7 +246,8 @@ struct gru_stats_s { | |||
| 245 | }; | 246 | }; |
| 246 | 247 | ||
| 247 | enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, | 248 | enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync, |
| 248 | cchop_deallocate, tghop_invalidate, mcsop_last}; | 249 | cchop_deallocate, tfhop_write_only, tfhop_write_restart, |
| 250 | tghop_invalidate, mcsop_last}; | ||
| 249 | 251 | ||
| 250 | struct mcs_op_statistic { | 252 | struct mcs_op_statistic { |
| 251 | atomic_long_t count; | 253 | atomic_long_t count; |
| @@ -335,6 +337,7 @@ struct gru_vma_data { | |||
| 335 | long vd_user_options;/* misc user option flags */ | 337 | long vd_user_options;/* misc user option flags */ |
| 336 | int vd_cbr_au_count; | 338 | int vd_cbr_au_count; |
| 337 | int vd_dsr_au_count; | 339 | int vd_dsr_au_count; |
| 340 | unsigned char vd_tlb_preload_count; | ||
| 338 | }; | 341 | }; |
| 339 | 342 | ||
| 340 | /* | 343 | /* |
| @@ -350,6 +353,7 @@ struct gru_thread_state { | |||
| 350 | struct gru_state *ts_gru; /* GRU where the context is | 353 | struct gru_state *ts_gru; /* GRU where the context is |
| 351 | loaded */ | 354 | loaded */ |
| 352 | struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ | 355 | struct gru_mm_struct *ts_gms; /* asid & ioproc struct */ |
| 356 | unsigned char ts_tlb_preload_count; /* TLB preload pages */ | ||
| 353 | unsigned long ts_cbr_map; /* map of allocated CBRs */ | 357 | unsigned long ts_cbr_map; /* map of allocated CBRs */ |
| 354 | unsigned long ts_dsr_map; /* map of allocated DATA | 358 | unsigned long ts_dsr_map; /* map of allocated DATA |
| 355 | resources */ | 359 | resources */ |
| @@ -661,7 +665,8 @@ extern int gru_proc_init(void); | |||
| 661 | extern void gru_proc_exit(void); | 665 | extern void gru_proc_exit(void); |
| 662 | 666 | ||
| 663 | extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, | 667 | extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, |
| 664 | int cbr_au_count, int dsr_au_count, int options, int tsid); | 668 | int cbr_au_count, int dsr_au_count, |
| 669 | unsigned char tlb_preload_count, int options, int tsid); | ||
| 665 | extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, | 670 | extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, |
| 666 | int cbr_au_count, char *cbmap); | 671 | int cbr_au_count, char *cbmap); |
| 667 | extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, | 672 | extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, |
