diff options
Diffstat (limited to 'arch/powerpc/kernel/process.c')
-rw-r--r-- | arch/powerpc/kernel/process.c | 225 |
1 files changed, 190 insertions, 35 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a0c74bbf3454..bfdd783e3916 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -77,6 +77,13 @@ | |||
77 | extern unsigned long _get_SP(void); | 77 | extern unsigned long _get_SP(void); |
78 | 78 | ||
79 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 79 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
80 | /* | ||
81 | * Are we running in "Suspend disabled" mode? If so we have to block any | ||
82 | * sigreturn that would get us into suspended state, and we also warn in some | ||
83 | * other paths that we should never reach with suspend disabled. | ||
84 | */ | ||
85 | bool tm_suspend_disabled __ro_after_init = false; | ||
86 | |||
80 | static void check_if_tm_restore_required(struct task_struct *tsk) | 87 | static void check_if_tm_restore_required(struct task_struct *tsk) |
81 | { | 88 | { |
82 | /* | 89 | /* |
@@ -97,9 +104,23 @@ static inline bool msr_tm_active(unsigned long msr) | |||
97 | { | 104 | { |
98 | return MSR_TM_ACTIVE(msr); | 105 | return MSR_TM_ACTIVE(msr); |
99 | } | 106 | } |
107 | |||
108 | static bool tm_active_with_fp(struct task_struct *tsk) | ||
109 | { | ||
110 | return msr_tm_active(tsk->thread.regs->msr) && | ||
111 | (tsk->thread.ckpt_regs.msr & MSR_FP); | ||
112 | } | ||
113 | |||
114 | static bool tm_active_with_altivec(struct task_struct *tsk) | ||
115 | { | ||
116 | return msr_tm_active(tsk->thread.regs->msr) && | ||
117 | (tsk->thread.ckpt_regs.msr & MSR_VEC); | ||
118 | } | ||
100 | #else | 119 | #else |
101 | static inline bool msr_tm_active(unsigned long msr) { return false; } | 120 | static inline bool msr_tm_active(unsigned long msr) { return false; } |
102 | static inline void check_if_tm_restore_required(struct task_struct *tsk) { } | 121 | static inline void check_if_tm_restore_required(struct task_struct *tsk) { } |
122 | static inline bool tm_active_with_fp(struct task_struct *tsk) { return false; } | ||
123 | static inline bool tm_active_with_altivec(struct task_struct *tsk) { return false; } | ||
103 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ | 124 | #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ |
104 | 125 | ||
105 | bool strict_msr_control; | 126 | bool strict_msr_control; |
@@ -232,7 +253,7 @@ EXPORT_SYMBOL(enable_kernel_fp); | |||
232 | 253 | ||
233 | static int restore_fp(struct task_struct *tsk) | 254 | static int restore_fp(struct task_struct *tsk) |
234 | { | 255 | { |
235 | if (tsk->thread.load_fp || msr_tm_active(tsk->thread.regs->msr)) { | 256 | if (tsk->thread.load_fp || tm_active_with_fp(tsk)) { |
236 | load_fp_state(¤t->thread.fp_state); | 257 | load_fp_state(¤t->thread.fp_state); |
237 | current->thread.load_fp++; | 258 | current->thread.load_fp++; |
238 | return 1; | 259 | return 1; |
@@ -314,7 +335,7 @@ EXPORT_SYMBOL_GPL(flush_altivec_to_thread); | |||
314 | static int restore_altivec(struct task_struct *tsk) | 335 | static int restore_altivec(struct task_struct *tsk) |
315 | { | 336 | { |
316 | if (cpu_has_feature(CPU_FTR_ALTIVEC) && | 337 | if (cpu_has_feature(CPU_FTR_ALTIVEC) && |
317 | (tsk->thread.load_vec || msr_tm_active(tsk->thread.regs->msr))) { | 338 | (tsk->thread.load_vec || tm_active_with_altivec(tsk))) { |
318 | load_vr_state(&tsk->thread.vr_state); | 339 | load_vr_state(&tsk->thread.vr_state); |
319 | tsk->thread.used_vr = 1; | 340 | tsk->thread.used_vr = 1; |
320 | tsk->thread.load_vec++; | 341 | tsk->thread.load_vec++; |
@@ -853,6 +874,10 @@ static void tm_reclaim_thread(struct thread_struct *thr, | |||
853 | if (!MSR_TM_SUSPENDED(mfmsr())) | 874 | if (!MSR_TM_SUSPENDED(mfmsr())) |
854 | return; | 875 | return; |
855 | 876 | ||
877 | giveup_all(container_of(thr, struct task_struct, thread)); | ||
878 | |||
879 | tm_reclaim(thr, cause); | ||
880 | |||
856 | /* | 881 | /* |
857 | * If we are in a transaction and FP is off then we can't have | 882 | * If we are in a transaction and FP is off then we can't have |
858 | * used FP inside that transaction. Hence the checkpointed | 883 | * used FP inside that transaction. Hence the checkpointed |
@@ -871,10 +896,6 @@ static void tm_reclaim_thread(struct thread_struct *thr, | |||
871 | if ((thr->ckpt_regs.msr & MSR_VEC) == 0) | 896 | if ((thr->ckpt_regs.msr & MSR_VEC) == 0) |
872 | memcpy(&thr->ckvr_state, &thr->vr_state, | 897 | memcpy(&thr->ckvr_state, &thr->vr_state, |
873 | sizeof(struct thread_vr_state)); | 898 | sizeof(struct thread_vr_state)); |
874 | |||
875 | giveup_all(container_of(thr, struct task_struct, thread)); | ||
876 | |||
877 | tm_reclaim(thr, thr->ckpt_regs.msr, cause); | ||
878 | } | 899 | } |
879 | 900 | ||
880 | void tm_reclaim_current(uint8_t cause) | 901 | void tm_reclaim_current(uint8_t cause) |
@@ -903,6 +924,8 @@ static inline void tm_reclaim_task(struct task_struct *tsk) | |||
903 | if (!MSR_TM_ACTIVE(thr->regs->msr)) | 924 | if (!MSR_TM_ACTIVE(thr->regs->msr)) |
904 | goto out_and_saveregs; | 925 | goto out_and_saveregs; |
905 | 926 | ||
927 | WARN_ON(tm_suspend_disabled); | ||
928 | |||
906 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " | 929 | TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, " |
907 | "ccr=%lx, msr=%lx, trap=%lx)\n", | 930 | "ccr=%lx, msr=%lx, trap=%lx)\n", |
908 | tsk->pid, thr->regs->nip, | 931 | tsk->pid, thr->regs->nip, |
@@ -923,11 +946,9 @@ out_and_saveregs: | |||
923 | tm_save_sprs(thr); | 946 | tm_save_sprs(thr); |
924 | } | 947 | } |
925 | 948 | ||
926 | extern void __tm_recheckpoint(struct thread_struct *thread, | 949 | extern void __tm_recheckpoint(struct thread_struct *thread); |
927 | unsigned long orig_msr); | ||
928 | 950 | ||
929 | void tm_recheckpoint(struct thread_struct *thread, | 951 | void tm_recheckpoint(struct thread_struct *thread) |
930 | unsigned long orig_msr) | ||
931 | { | 952 | { |
932 | unsigned long flags; | 953 | unsigned long flags; |
933 | 954 | ||
@@ -946,15 +967,13 @@ void tm_recheckpoint(struct thread_struct *thread, | |||
946 | */ | 967 | */ |
947 | tm_restore_sprs(thread); | 968 | tm_restore_sprs(thread); |
948 | 969 | ||
949 | __tm_recheckpoint(thread, orig_msr); | 970 | __tm_recheckpoint(thread); |
950 | 971 | ||
951 | local_irq_restore(flags); | 972 | local_irq_restore(flags); |
952 | } | 973 | } |
953 | 974 | ||
954 | static inline void tm_recheckpoint_new_task(struct task_struct *new) | 975 | static inline void tm_recheckpoint_new_task(struct task_struct *new) |
955 | { | 976 | { |
956 | unsigned long msr; | ||
957 | |||
958 | if (!cpu_has_feature(CPU_FTR_TM)) | 977 | if (!cpu_has_feature(CPU_FTR_TM)) |
959 | return; | 978 | return; |
960 | 979 | ||
@@ -973,13 +992,11 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new) | |||
973 | tm_restore_sprs(&new->thread); | 992 | tm_restore_sprs(&new->thread); |
974 | return; | 993 | return; |
975 | } | 994 | } |
976 | msr = new->thread.ckpt_regs.msr; | ||
977 | /* Recheckpoint to restore original checkpointed register state. */ | 995 | /* Recheckpoint to restore original checkpointed register state. */ |
978 | TM_DEBUG("*** tm_recheckpoint of pid %d " | 996 | TM_DEBUG("*** tm_recheckpoint of pid %d (new->msr 0x%lx)\n", |
979 | "(new->msr 0x%lx, new->origmsr 0x%lx)\n", | 997 | new->pid, new->thread.regs->msr); |
980 | new->pid, new->thread.regs->msr, msr); | ||
981 | 998 | ||
982 | tm_recheckpoint(&new->thread, msr); | 999 | tm_recheckpoint(&new->thread); |
983 | 1000 | ||
984 | /* | 1001 | /* |
985 | * The checkpointed state has been restored but the live state has | 1002 | * The checkpointed state has been restored but the live state has |
@@ -1119,6 +1136,10 @@ static inline void restore_sprs(struct thread_struct *old_thread, | |||
1119 | if (old_thread->tar != new_thread->tar) | 1136 | if (old_thread->tar != new_thread->tar) |
1120 | mtspr(SPRN_TAR, new_thread->tar); | 1137 | mtspr(SPRN_TAR, new_thread->tar); |
1121 | } | 1138 | } |
1139 | |||
1140 | if (cpu_has_feature(CPU_FTR_ARCH_300) && | ||
1141 | old_thread->tidr != new_thread->tidr) | ||
1142 | mtspr(SPRN_TIDR, new_thread->tidr); | ||
1122 | #endif | 1143 | #endif |
1123 | } | 1144 | } |
1124 | 1145 | ||
@@ -1155,7 +1176,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
1155 | } | 1176 | } |
1156 | #endif /* CONFIG_PPC64 */ | 1177 | #endif /* CONFIG_PPC64 */ |
1157 | 1178 | ||
1158 | #ifdef CONFIG_PPC_STD_MMU_64 | 1179 | #ifdef CONFIG_PPC_BOOK3S_64 |
1159 | batch = this_cpu_ptr(&ppc64_tlb_batch); | 1180 | batch = this_cpu_ptr(&ppc64_tlb_batch); |
1160 | if (batch->active) { | 1181 | if (batch->active) { |
1161 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | 1182 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; |
@@ -1163,7 +1184,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
1163 | __flush_tlb_pending(batch); | 1184 | __flush_tlb_pending(batch); |
1164 | batch->active = 0; | 1185 | batch->active = 0; |
1165 | } | 1186 | } |
1166 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 1187 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
1167 | 1188 | ||
1168 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 1189 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1169 | switch_booke_debug_regs(&new->thread.debug); | 1190 | switch_booke_debug_regs(&new->thread.debug); |
@@ -1209,7 +1230,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
1209 | 1230 | ||
1210 | last = _switch(old_thread, new_thread); | 1231 | last = _switch(old_thread, new_thread); |
1211 | 1232 | ||
1212 | #ifdef CONFIG_PPC_STD_MMU_64 | 1233 | #ifdef CONFIG_PPC_BOOK3S_64 |
1213 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | 1234 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { |
1214 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | 1235 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; |
1215 | batch = this_cpu_ptr(&ppc64_tlb_batch); | 1236 | batch = this_cpu_ptr(&ppc64_tlb_batch); |
@@ -1223,22 +1244,22 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
1223 | * The copy-paste buffer can only store into foreign real | 1244 | * The copy-paste buffer can only store into foreign real |
1224 | * addresses, so unprivileged processes can not see the | 1245 | * addresses, so unprivileged processes can not see the |
1225 | * data or use it in any way unless they have foreign real | 1246 | * data or use it in any way unless they have foreign real |
1226 | * mappings. We don't have a VAS driver that allocates those | 1247 | * mappings. If the new process has the foreign real address |
1227 | * yet, so no cpabort is required. | 1248 | * mappings, we must issue a cp_abort to clear any state and |
1249 | * prevent snooping, corruption or a covert channel. | ||
1250 | * | ||
1251 | * DD1 allows paste into normal system memory so we do an | ||
1252 | * unpaired copy, rather than cp_abort, to clear the buffer, | ||
1253 | * since cp_abort is quite expensive. | ||
1228 | */ | 1254 | */ |
1229 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { | 1255 | if (current_thread_info()->task->thread.used_vas) { |
1230 | /* | 1256 | asm volatile(PPC_CP_ABORT); |
1231 | * DD1 allows paste into normal system memory, so we | 1257 | } else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) { |
1232 | * do an unpaired copy here to clear the buffer and | ||
1233 | * prevent a covert channel being set up. | ||
1234 | * | ||
1235 | * cpabort is not used because it is quite expensive. | ||
1236 | */ | ||
1237 | asm volatile(PPC_COPY(%0, %1) | 1258 | asm volatile(PPC_COPY(%0, %1) |
1238 | : : "r"(dummy_copy_buffer), "r"(0)); | 1259 | : : "r"(dummy_copy_buffer), "r"(0)); |
1239 | } | 1260 | } |
1240 | } | 1261 | } |
1241 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 1262 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
1242 | 1263 | ||
1243 | return last; | 1264 | return last; |
1244 | } | 1265 | } |
@@ -1434,6 +1455,137 @@ void flush_thread(void) | |||
1434 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 1455 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
1435 | } | 1456 | } |
1436 | 1457 | ||
1458 | int set_thread_uses_vas(void) | ||
1459 | { | ||
1460 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
1461 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | ||
1462 | return -EINVAL; | ||
1463 | |||
1464 | current->thread.used_vas = 1; | ||
1465 | |||
1466 | /* | ||
1467 | * Even a process that has no foreign real address mapping can use | ||
1468 | * an unpaired COPY instruction (to no real effect). Issue CP_ABORT | ||
1469 | * to clear any pending COPY and prevent a covert channel. | ||
1470 | * | ||
1471 | * __switch_to() will issue CP_ABORT on future context switches. | ||
1472 | */ | ||
1473 | asm volatile(PPC_CP_ABORT); | ||
1474 | |||
1475 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
1476 | return 0; | ||
1477 | } | ||
1478 | |||
1479 | #ifdef CONFIG_PPC64 | ||
1480 | static DEFINE_SPINLOCK(vas_thread_id_lock); | ||
1481 | static DEFINE_IDA(vas_thread_ida); | ||
1482 | |||
1483 | /* | ||
1484 | * We need to assign a unique thread id to each thread in a process. | ||
1485 | * | ||
1486 | * This thread id, referred to as TIDR, and separate from the Linux's tgid, | ||
1487 | * is intended to be used to direct an ASB_Notify from the hardware to the | ||
1488 | * thread, when a suitable event occurs in the system. | ||
1489 | * | ||
1490 | * One such event is a "paste" instruction in the context of Fast Thread | ||
1491 | * Wakeup (aka Core-to-core wake up in the Virtual Accelerator Switchboard | ||
1492 | * (VAS) in POWER9. | ||
1493 | * | ||
1494 | * To get a unique TIDR per process we could simply reuse task_pid_nr() but | ||
1495 | * the problem is that task_pid_nr() is not yet available copy_thread() is | ||
1496 | * called. Fixing that would require changing more intrusive arch-neutral | ||
1497 | * code in code path in copy_process()?. | ||
1498 | * | ||
1499 | * Further, to assign unique TIDRs within each process, we need an atomic | ||
1500 | * field (or an IDR) in task_struct, which again intrudes into the arch- | ||
1501 | * neutral code. So try to assign globally unique TIDRs for now. | ||
1502 | * | ||
1503 | * NOTE: TIDR 0 indicates that the thread does not need a TIDR value. | ||
1504 | * For now, only threads that expect to be notified by the VAS | ||
1505 | * hardware need a TIDR value and we assign values > 0 for those. | ||
1506 | */ | ||
1507 | #define MAX_THREAD_CONTEXT ((1 << 16) - 1) | ||
1508 | static int assign_thread_tidr(void) | ||
1509 | { | ||
1510 | int index; | ||
1511 | int err; | ||
1512 | |||
1513 | again: | ||
1514 | if (!ida_pre_get(&vas_thread_ida, GFP_KERNEL)) | ||
1515 | return -ENOMEM; | ||
1516 | |||
1517 | spin_lock(&vas_thread_id_lock); | ||
1518 | err = ida_get_new_above(&vas_thread_ida, 1, &index); | ||
1519 | spin_unlock(&vas_thread_id_lock); | ||
1520 | |||
1521 | if (err == -EAGAIN) | ||
1522 | goto again; | ||
1523 | else if (err) | ||
1524 | return err; | ||
1525 | |||
1526 | if (index > MAX_THREAD_CONTEXT) { | ||
1527 | spin_lock(&vas_thread_id_lock); | ||
1528 | ida_remove(&vas_thread_ida, index); | ||
1529 | spin_unlock(&vas_thread_id_lock); | ||
1530 | return -ENOMEM; | ||
1531 | } | ||
1532 | |||
1533 | return index; | ||
1534 | } | ||
1535 | |||
1536 | static void free_thread_tidr(int id) | ||
1537 | { | ||
1538 | spin_lock(&vas_thread_id_lock); | ||
1539 | ida_remove(&vas_thread_ida, id); | ||
1540 | spin_unlock(&vas_thread_id_lock); | ||
1541 | } | ||
1542 | |||
1543 | /* | ||
1544 | * Clear any TIDR value assigned to this thread. | ||
1545 | */ | ||
1546 | void clear_thread_tidr(struct task_struct *t) | ||
1547 | { | ||
1548 | if (!t->thread.tidr) | ||
1549 | return; | ||
1550 | |||
1551 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { | ||
1552 | WARN_ON_ONCE(1); | ||
1553 | return; | ||
1554 | } | ||
1555 | |||
1556 | mtspr(SPRN_TIDR, 0); | ||
1557 | free_thread_tidr(t->thread.tidr); | ||
1558 | t->thread.tidr = 0; | ||
1559 | } | ||
1560 | |||
1561 | void arch_release_task_struct(struct task_struct *t) | ||
1562 | { | ||
1563 | clear_thread_tidr(t); | ||
1564 | } | ||
1565 | |||
1566 | /* | ||
1567 | * Assign a unique TIDR (thread id) for task @t and set it in the thread | ||
1568 | * structure. For now, we only support setting TIDR for 'current' task. | ||
1569 | */ | ||
1570 | int set_thread_tidr(struct task_struct *t) | ||
1571 | { | ||
1572 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | ||
1573 | return -EINVAL; | ||
1574 | |||
1575 | if (t != current) | ||
1576 | return -EINVAL; | ||
1577 | |||
1578 | t->thread.tidr = assign_thread_tidr(); | ||
1579 | if (t->thread.tidr < 0) | ||
1580 | return t->thread.tidr; | ||
1581 | |||
1582 | mtspr(SPRN_TIDR, t->thread.tidr); | ||
1583 | |||
1584 | return 0; | ||
1585 | } | ||
1586 | |||
1587 | #endif /* CONFIG_PPC64 */ | ||
1588 | |||
1437 | void | 1589 | void |
1438 | release_thread(struct task_struct *t) | 1590 | release_thread(struct task_struct *t) |
1439 | { | 1591 | { |
@@ -1467,7 +1619,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | |||
1467 | 1619 | ||
1468 | static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) | 1620 | static void setup_ksp_vsid(struct task_struct *p, unsigned long sp) |
1469 | { | 1621 | { |
1470 | #ifdef CONFIG_PPC_STD_MMU_64 | 1622 | #ifdef CONFIG_PPC_BOOK3S_64 |
1471 | unsigned long sp_vsid; | 1623 | unsigned long sp_vsid; |
1472 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 1624 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; |
1473 | 1625 | ||
@@ -1580,6 +1732,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
1580 | } | 1732 | } |
1581 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) | 1733 | if (cpu_has_feature(CPU_FTR_HAS_PPR)) |
1582 | p->thread.ppr = INIT_PPR; | 1734 | p->thread.ppr = INIT_PPR; |
1735 | |||
1736 | p->thread.tidr = 0; | ||
1583 | #endif | 1737 | #endif |
1584 | kregs->nip = ppc_function_entry(f); | 1738 | kregs->nip = ppc_function_entry(f); |
1585 | return 0; | 1739 | return 0; |
@@ -1898,7 +2052,8 @@ unsigned long get_wchan(struct task_struct *p) | |||
1898 | 2052 | ||
1899 | do { | 2053 | do { |
1900 | sp = *(unsigned long *)sp; | 2054 | sp = *(unsigned long *)sp; |
1901 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD)) | 2055 | if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD) || |
2056 | p->state == TASK_RUNNING) | ||
1902 | return 0; | 2057 | return 0; |
1903 | if (count > 0) { | 2058 | if (count > 0) { |
1904 | ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; | 2059 | ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE]; |
@@ -2046,7 +2201,7 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
2046 | unsigned long base = mm->brk; | 2201 | unsigned long base = mm->brk; |
2047 | unsigned long ret; | 2202 | unsigned long ret; |
2048 | 2203 | ||
2049 | #ifdef CONFIG_PPC_STD_MMU_64 | 2204 | #ifdef CONFIG_PPC_BOOK3S_64 |
2050 | /* | 2205 | /* |
2051 | * If we are using 1TB segments and we are allowed to randomise | 2206 | * If we are using 1TB segments and we are allowed to randomise |
2052 | * the heap, we can put it above 1TB so it is backed by a 1TB | 2207 | * the heap, we can put it above 1TB so it is backed by a 1TB |