diff options
author | Ralph Campbell <ralph.campbell@qlogic.com> | 2008-04-17 00:09:29 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-04-17 00:09:29 -0400 |
commit | 9355fb6a064723c71e80e9c78de3140b43bfb52d (patch) | |
tree | dd0fffeb6633aed6cb2c946a05bf33e05f2e9436 /drivers/infiniband/hw/ipath/ipath_init_chip.c | |
parent | 2ba3f56eb402672ff83601b5990b219d39577636 (diff) |
IB/ipath: Add support for 7220 receive queue changes
Newer HCAs have a HW option to write a sequence number to each receive
queue entry and avoid a separate DMA of the tail register to memory.
This patch adds support for these changes.
Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_init_chip.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_init_chip.c | 152 |
1 files changed, 84 insertions, 68 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_init_chip.c b/drivers/infiniband/hw/ipath/ipath_init_chip.c index 94f938f5d5c3..720ff4df84eb 100644 --- a/drivers/infiniband/hw/ipath/ipath_init_chip.c +++ b/drivers/infiniband/hw/ipath/ipath_init_chip.c | |||
@@ -219,14 +219,14 @@ static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd) | |||
219 | pd->port_cnt = 1; | 219 | pd->port_cnt = 1; |
220 | /* The port 0 pkey table is used by the layer interface. */ | 220 | /* The port 0 pkey table is used by the layer interface. */ |
221 | pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; | 221 | pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY; |
222 | pd->port_seq_cnt = 1; | ||
222 | } | 223 | } |
223 | return pd; | 224 | return pd; |
224 | } | 225 | } |
225 | 226 | ||
226 | static int init_chip_first(struct ipath_devdata *dd, | 227 | static int init_chip_first(struct ipath_devdata *dd) |
227 | struct ipath_portdata **pdp) | ||
228 | { | 228 | { |
229 | struct ipath_portdata *pd = NULL; | 229 | struct ipath_portdata *pd; |
230 | int ret = 0; | 230 | int ret = 0; |
231 | u64 val; | 231 | u64 val; |
232 | 232 | ||
@@ -242,12 +242,14 @@ static int init_chip_first(struct ipath_devdata *dd, | |||
242 | else if (ipath_cfgports <= dd->ipath_portcnt) { | 242 | else if (ipath_cfgports <= dd->ipath_portcnt) { |
243 | dd->ipath_cfgports = ipath_cfgports; | 243 | dd->ipath_cfgports = ipath_cfgports; |
244 | ipath_dbg("Configured to use %u ports out of %u in chip\n", | 244 | ipath_dbg("Configured to use %u ports out of %u in chip\n", |
245 | dd->ipath_cfgports, dd->ipath_portcnt); | 245 | dd->ipath_cfgports, ipath_read_kreg32(dd, |
246 | dd->ipath_kregs->kr_portcnt)); | ||
246 | } else { | 247 | } else { |
247 | dd->ipath_cfgports = dd->ipath_portcnt; | 248 | dd->ipath_cfgports = dd->ipath_portcnt; |
248 | ipath_dbg("Tried to configured to use %u ports; chip " | 249 | ipath_dbg("Tried to configured to use %u ports; chip " |
249 | "only supports %u\n", ipath_cfgports, | 250 | "only supports %u\n", ipath_cfgports, |
250 | dd->ipath_portcnt); | 251 | ipath_read_kreg32(dd, |
252 | dd->ipath_kregs->kr_portcnt)); | ||
251 | } | 253 | } |
252 | /* | 254 | /* |
253 | * Allocate full portcnt array, rather than just cfgports, because | 255 | * Allocate full portcnt array, rather than just cfgports, because |
@@ -324,36 +326,39 @@ static int init_chip_first(struct ipath_devdata *dd, | |||
324 | mutex_init(&dd->ipath_eep_lock); | 326 | mutex_init(&dd->ipath_eep_lock); |
325 | 327 | ||
326 | done: | 328 | done: |
327 | *pdp = pd; | ||
328 | return ret; | 329 | return ret; |
329 | } | 330 | } |
330 | 331 | ||
331 | /** | 332 | /** |
332 | * init_chip_reset - re-initialize after a reset, or enable | 333 | * init_chip_reset - re-initialize after a reset, or enable |
333 | * @dd: the infinipath device | 334 | * @dd: the infinipath device |
334 | * @pdp: output for port data | ||
335 | * | 335 | * |
336 | * sanity check at least some of the values after reset, and | 336 | * sanity check at least some of the values after reset, and |
337 | * ensure no receive or transmit (explictly, in case reset | 337 | * ensure no receive or transmit (explictly, in case reset |
338 | * failed | 338 | * failed |
339 | */ | 339 | */ |
340 | static int init_chip_reset(struct ipath_devdata *dd, | 340 | static int init_chip_reset(struct ipath_devdata *dd) |
341 | struct ipath_portdata **pdp) | ||
342 | { | 341 | { |
343 | u32 rtmp; | 342 | u32 rtmp; |
343 | int i; | ||
344 | |||
345 | /* | ||
346 | * ensure chip does no sends or receives, tail updates, or | ||
347 | * pioavail updates while we re-initialize | ||
348 | */ | ||
349 | dd->ipath_rcvctrl &= ~(1ULL << dd->ipath_r_tailupd_shift); | ||
350 | for (i = 0; i < dd->ipath_portcnt; i++) { | ||
351 | clear_bit(dd->ipath_r_portenable_shift + i, | ||
352 | &dd->ipath_rcvctrl); | ||
353 | clear_bit(dd->ipath_r_intravail_shift + i, | ||
354 | &dd->ipath_rcvctrl); | ||
355 | } | ||
356 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | ||
357 | dd->ipath_rcvctrl); | ||
344 | 358 | ||
345 | *pdp = dd->ipath_pd[0]; | ||
346 | /* ensure chip does no sends or receives while we re-initialize */ | ||
347 | dd->ipath_control = dd->ipath_sendctrl = dd->ipath_rcvctrl = 0U; | ||
348 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); | ||
349 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); | 359 | ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl); |
350 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); | 360 | ipath_write_kreg(dd, dd->ipath_kregs->kr_control, dd->ipath_control); |
351 | 361 | ||
352 | rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_portcnt); | ||
353 | if (dd->ipath_portcnt != rtmp) | ||
354 | dev_info(&dd->pcidev->dev, "portcnt was %u before " | ||
355 | "reset, now %u, using original\n", | ||
356 | dd->ipath_portcnt, rtmp); | ||
357 | rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); | 362 | rtmp = ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt); |
358 | if (rtmp != dd->ipath_rcvtidcnt) | 363 | if (rtmp != dd->ipath_rcvtidcnt) |
359 | dev_info(&dd->pcidev->dev, "tidcnt was %u before " | 364 | dev_info(&dd->pcidev->dev, "tidcnt was %u before " |
@@ -456,10 +461,10 @@ static void init_shadow_tids(struct ipath_devdata *dd) | |||
456 | dd->ipath_physshadow = addrs; | 461 | dd->ipath_physshadow = addrs; |
457 | } | 462 | } |
458 | 463 | ||
459 | static void enable_chip(struct ipath_devdata *dd, | 464 | static void enable_chip(struct ipath_devdata *dd, int reinit) |
460 | struct ipath_portdata *pd, int reinit) | ||
461 | { | 465 | { |
462 | u32 val; | 466 | u32 val; |
467 | u64 rcvmask; | ||
463 | unsigned long flags; | 468 | unsigned long flags; |
464 | int i; | 469 | int i; |
465 | 470 | ||
@@ -478,12 +483,15 @@ static void enable_chip(struct ipath_devdata *dd, | |||
478 | spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); | 483 | spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags); |
479 | 484 | ||
480 | /* | 485 | /* |
481 | * enable port 0 receive, and receive interrupt. other ports | 486 | * Enable kernel ports' receive and receive interrupt. |
482 | * done as user opens and inits them. | 487 | * Other ports done as user opens and inits them. |
483 | */ | 488 | */ |
484 | dd->ipath_rcvctrl = (1ULL << dd->ipath_r_tailupd_shift) | | 489 | rcvmask = 1ULL; |
485 | (1ULL << dd->ipath_r_portenable_shift) | | 490 | dd->ipath_rcvctrl |= (rcvmask << dd->ipath_r_portenable_shift) | |
486 | (1ULL << dd->ipath_r_intravail_shift); | 491 | (rcvmask << dd->ipath_r_intravail_shift); |
492 | if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) | ||
493 | dd->ipath_rcvctrl |= (1ULL << dd->ipath_r_tailupd_shift); | ||
494 | |||
487 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | 495 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, |
488 | dd->ipath_rcvctrl); | 496 | dd->ipath_rcvctrl); |
489 | 497 | ||
@@ -494,8 +502,8 @@ static void enable_chip(struct ipath_devdata *dd, | |||
494 | dd->ipath_flags |= IPATH_INITTED; | 502 | dd->ipath_flags |= IPATH_INITTED; |
495 | 503 | ||
496 | /* | 504 | /* |
497 | * init our shadow copies of head from tail values, and write | 505 | * Init our shadow copies of head from tail values, |
498 | * head values to match. | 506 | * and write head values to match. |
499 | */ | 507 | */ |
500 | val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); | 508 | val = ipath_read_ureg32(dd, ur_rcvegrindextail, 0); |
501 | ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); | 509 | ipath_write_ureg(dd, ur_rcvegrindexhead, val, 0); |
@@ -529,8 +537,7 @@ static void enable_chip(struct ipath_devdata *dd, | |||
529 | dd->ipath_flags |= IPATH_PRESENT; | 537 | dd->ipath_flags |= IPATH_PRESENT; |
530 | } | 538 | } |
531 | 539 | ||
532 | static int init_housekeeping(struct ipath_devdata *dd, | 540 | static int init_housekeeping(struct ipath_devdata *dd, int reinit) |
533 | struct ipath_portdata **pdp, int reinit) | ||
534 | { | 541 | { |
535 | char boardn[32]; | 542 | char boardn[32]; |
536 | int ret = 0; | 543 | int ret = 0; |
@@ -591,18 +598,9 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
591 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, | 598 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear, |
592 | INFINIPATH_E_RESET); | 599 | INFINIPATH_E_RESET); |
593 | 600 | ||
594 | if (reinit) | 601 | ipath_cdbg(VERBOSE, "Revision %llx (PCI %x)\n", |
595 | ret = init_chip_reset(dd, pdp); | 602 | (unsigned long long) dd->ipath_revision, |
596 | else | 603 | dd->ipath_pcirev); |
597 | ret = init_chip_first(dd, pdp); | ||
598 | |||
599 | if (ret) | ||
600 | goto done; | ||
601 | |||
602 | ipath_cdbg(VERBOSE, "Revision %llx (PCI %x), %u ports, %u tids, " | ||
603 | "%u egrtids\n", (unsigned long long) dd->ipath_revision, | ||
604 | dd->ipath_pcirev, dd->ipath_portcnt, dd->ipath_rcvtidcnt, | ||
605 | dd->ipath_rcvegrcnt); | ||
606 | 604 | ||
607 | if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & | 605 | if (((dd->ipath_revision >> INFINIPATH_R_SOFTWARE_SHIFT) & |
608 | INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) { | 606 | INFINIPATH_R_SOFTWARE_MASK) != IPATH_CHIP_SWVERSION) { |
@@ -641,6 +639,14 @@ static int init_housekeeping(struct ipath_devdata *dd, | |||
641 | 639 | ||
642 | ipath_dbg("%s", dd->ipath_boardversion); | 640 | ipath_dbg("%s", dd->ipath_boardversion); |
643 | 641 | ||
642 | if (ret) | ||
643 | goto done; | ||
644 | |||
645 | if (reinit) | ||
646 | ret = init_chip_reset(dd); | ||
647 | else | ||
648 | ret = init_chip_first(dd); | ||
649 | |||
644 | done: | 650 | done: |
645 | return ret; | 651 | return ret; |
646 | } | 652 | } |
@@ -666,11 +672,11 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
666 | u32 val32, kpiobufs; | 672 | u32 val32, kpiobufs; |
667 | u32 piobufs, uports; | 673 | u32 piobufs, uports; |
668 | u64 val; | 674 | u64 val; |
669 | struct ipath_portdata *pd = NULL; /* keep gcc4 happy */ | 675 | struct ipath_portdata *pd; |
670 | gfp_t gfp_flags = GFP_USER | __GFP_COMP; | 676 | gfp_t gfp_flags = GFP_USER | __GFP_COMP; |
671 | unsigned long flags; | 677 | unsigned long flags; |
672 | 678 | ||
673 | ret = init_housekeeping(dd, &pd, reinit); | 679 | ret = init_housekeeping(dd, reinit); |
674 | if (ret) | 680 | if (ret) |
675 | goto done; | 681 | goto done; |
676 | 682 | ||
@@ -690,7 +696,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
690 | * we now use routines that backend onto __get_free_pages, the | 696 | * we now use routines that backend onto __get_free_pages, the |
691 | * rest would be wasted. | 697 | * rest would be wasted. |
692 | */ | 698 | */ |
693 | dd->ipath_rcvhdrcnt = dd->ipath_rcvegrcnt; | 699 | dd->ipath_rcvhdrcnt = max(dd->ipath_p0_rcvegrcnt, dd->ipath_rcvegrcnt); |
694 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt, | 700 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrcnt, |
695 | dd->ipath_rcvhdrcnt); | 701 | dd->ipath_rcvhdrcnt); |
696 | 702 | ||
@@ -721,8 +727,8 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
721 | if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { | 727 | if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) { |
722 | int i = (int) piobufs - | 728 | int i = (int) piobufs - |
723 | (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); | 729 | (int) (uports * IPATH_MIN_USER_PORT_BUFCNT); |
724 | if (i < 0) | 730 | if (i < 1) |
725 | i = 0; | 731 | i = 1; |
726 | dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " | 732 | dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of " |
727 | "%d for kernel leaves too few for %d user ports " | 733 | "%d for kernel leaves too few for %d user ports " |
728 | "(%d each); using %u\n", kpiobufs, | 734 | "(%d each); using %u\n", kpiobufs, |
@@ -741,6 +747,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
741 | ipath_dbg("allocating %u pbufs/port leaves %u unused, " | 747 | ipath_dbg("allocating %u pbufs/port leaves %u unused, " |
742 | "add to kernel\n", dd->ipath_pbufsport, val32); | 748 | "add to kernel\n", dd->ipath_pbufsport, val32); |
743 | dd->ipath_lastport_piobuf -= val32; | 749 | dd->ipath_lastport_piobuf -= val32; |
750 | kpiobufs += val32; | ||
744 | ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", | 751 | ipath_dbg("%u pbufs/port leaves %u unused, add to kernel\n", |
745 | dd->ipath_pbufsport, val32); | 752 | dd->ipath_pbufsport, val32); |
746 | } | 753 | } |
@@ -759,8 +766,10 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
759 | */ | 766 | */ |
760 | ipath_cancel_sends(dd, 0); | 767 | ipath_cancel_sends(dd, 0); |
761 | 768 | ||
762 | /* early_init sets rcvhdrentsize and rcvhdrsize, so this must be | 769 | /* |
763 | * done after early_init */ | 770 | * Early_init sets rcvhdrentsize and rcvhdrsize, so this must be |
771 | * done after early_init. | ||
772 | */ | ||
764 | dd->ipath_hdrqlast = | 773 | dd->ipath_hdrqlast = |
765 | dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1); | 774 | dd->ipath_rcvhdrentsize * (dd->ipath_rcvhdrcnt - 1); |
766 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize, | 775 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvhdrentsize, |
@@ -835,58 +844,65 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit) | |||
835 | /* enable errors that are masked, at least this first time. */ | 844 | /* enable errors that are masked, at least this first time. */ |
836 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, | 845 | ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask, |
837 | ~dd->ipath_maskederrs); | 846 | ~dd->ipath_maskederrs); |
838 | dd->ipath_errormask = ipath_read_kreg64(dd, | 847 | dd->ipath_maskederrs = 0; /* don't re-enable ignored in timer */ |
839 | dd->ipath_kregs->kr_errormask); | 848 | dd->ipath_errormask = |
849 | ipath_read_kreg64(dd, dd->ipath_kregs->kr_errormask); | ||
840 | /* clear any interrupts up to this point (ints still not enabled) */ | 850 | /* clear any interrupts up to this point (ints still not enabled) */ |
841 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); | 851 | ipath_write_kreg(dd, dd->ipath_kregs->kr_intclear, -1LL); |
842 | 852 | ||
853 | dd->ipath_f_tidtemplate(dd); | ||
854 | |||
843 | /* | 855 | /* |
844 | * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing | 856 | * Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing |
845 | * re-init, the simplest way to handle this is to free | 857 | * re-init, the simplest way to handle this is to free |
846 | * existing, and re-allocate. | 858 | * existing, and re-allocate. |
847 | * Need to re-create rest of port 0 portdata as well. | 859 | * Need to re-create rest of port 0 portdata as well. |
848 | */ | 860 | */ |
861 | pd = dd->ipath_pd[0]; | ||
849 | if (reinit) { | 862 | if (reinit) { |
850 | /* Alloc and init new ipath_portdata for port0, | 863 | struct ipath_portdata *npd; |
864 | |||
865 | /* | ||
866 | * Alloc and init new ipath_portdata for port0, | ||
851 | * Then free old pd. Could lead to fragmentation, but also | 867 | * Then free old pd. Could lead to fragmentation, but also |
852 | * makes later support for hot-swap easier. | 868 | * makes later support for hot-swap easier. |
853 | */ | 869 | */ |
854 | struct ipath_portdata *npd; | ||
855 | npd = create_portdata0(dd); | 870 | npd = create_portdata0(dd); |
856 | if (npd) { | 871 | if (npd) { |
857 | ipath_free_pddata(dd, pd); | 872 | ipath_free_pddata(dd, pd); |
858 | dd->ipath_pd[0] = pd = npd; | 873 | dd->ipath_pd[0] = npd; |
874 | pd = npd; | ||
859 | } else { | 875 | } else { |
860 | ipath_dev_err(dd, "Unable to allocate portdata for" | 876 | ipath_dev_err(dd, "Unable to allocate portdata" |
861 | " port 0, failing\n"); | 877 | " for port 0, failing\n"); |
862 | ret = -ENOMEM; | 878 | ret = -ENOMEM; |
863 | goto done; | 879 | goto done; |
864 | } | 880 | } |
865 | } | 881 | } |
866 | dd->ipath_f_tidtemplate(dd); | ||
867 | ret = ipath_create_rcvhdrq(dd, pd); | 882 | ret = ipath_create_rcvhdrq(dd, pd); |
868 | if (!ret) { | 883 | if (!ret) |
869 | dd->ipath_hdrqtailptr = | ||
870 | (volatile __le64 *)pd->port_rcvhdrtail_kvaddr; | ||
871 | ret = create_port0_egr(dd); | 884 | ret = create_port0_egr(dd); |
872 | } | 885 | if (ret) { |
873 | if (ret) | 886 | ipath_dev_err(dd, "failed to allocate kernel port's " |
874 | ipath_dev_err(dd, "failed to allocate port 0 (kernel) " | ||
875 | "rcvhdrq and/or egr bufs\n"); | 887 | "rcvhdrq and/or egr bufs\n"); |
888 | goto done; | ||
889 | } | ||
876 | else | 890 | else |
877 | enable_chip(dd, pd, reinit); | 891 | enable_chip(dd, reinit); |
878 | 892 | ||
879 | 893 | if (!reinit) { | |
880 | if (!ret && !reinit) { | 894 | /* |
881 | /* used when we close a port, for DMA already in flight at close */ | 895 | * Used when we close a port, for DMA already in flight |
896 | * at close. | ||
897 | */ | ||
882 | dd->ipath_dummy_hdrq = dma_alloc_coherent( | 898 | dd->ipath_dummy_hdrq = dma_alloc_coherent( |
883 | &dd->pcidev->dev, pd->port_rcvhdrq_size, | 899 | &dd->pcidev->dev, dd->ipath_pd[0]->port_rcvhdrq_size, |
884 | &dd->ipath_dummy_hdrq_phys, | 900 | &dd->ipath_dummy_hdrq_phys, |
885 | gfp_flags); | 901 | gfp_flags); |
886 | if (!dd->ipath_dummy_hdrq) { | 902 | if (!dd->ipath_dummy_hdrq) { |
887 | dev_info(&dd->pcidev->dev, | 903 | dev_info(&dd->pcidev->dev, |
888 | "Couldn't allocate 0x%lx bytes for dummy hdrq\n", | 904 | "Couldn't allocate 0x%lx bytes for dummy hdrq\n", |
889 | pd->port_rcvhdrq_size); | 905 | dd->ipath_pd[0]->port_rcvhdrq_size); |
890 | /* fallback to just 0'ing */ | 906 | /* fallback to just 0'ing */ |
891 | dd->ipath_dummy_hdrq_phys = 0UL; | 907 | dd->ipath_dummy_hdrq_phys = 0UL; |
892 | } | 908 | } |