diff options
| author | Tony Luck <tony.luck@intel.com> | 2008-04-17 13:13:09 -0400 |
|---|---|---|
| committer | Tony Luck <tony.luck@intel.com> | 2008-04-17 13:13:09 -0400 |
| commit | a49072bb367a94a9af6c6a6dcbaa1dc5617d7fa0 (patch) | |
| tree | 155159e2ace7175298465b6fbf1024c77de8edce | |
| parent | fc494d6c1825de37f04abe147741d50be08403ab (diff) | |
| parent | 31a6b11fed6ceec07ec4bdfefae56b8252d450cf (diff) | |
Pull kvm-patches into release branch
| -rw-r--r-- | arch/ia64/kernel/mca.c | 49 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca_asm.S | 5 | ||||
| -rw-r--r-- | arch/ia64/kernel/smp.c | 82 | ||||
| -rw-r--r-- | arch/ia64/mm/tlb.c | 196 | ||||
| -rw-r--r-- | include/asm-ia64/kregs.h | 3 | ||||
| -rw-r--r-- | include/asm-ia64/smp.h | 3 | ||||
| -rw-r--r-- | include/asm-ia64/tlb.h | 26 |
7 files changed, 364 insertions, 0 deletions
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 6c18221dba36..607006a6a976 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
| @@ -97,6 +97,7 @@ | |||
| 97 | 97 | ||
| 98 | #include <asm/irq.h> | 98 | #include <asm/irq.h> |
| 99 | #include <asm/hw_irq.h> | 99 | #include <asm/hw_irq.h> |
| 100 | #include <asm/tlb.h> | ||
| 100 | 101 | ||
| 101 | #include "mca_drv.h" | 102 | #include "mca_drv.h" |
| 102 | #include "entry.h" | 103 | #include "entry.h" |
| @@ -112,6 +113,7 @@ DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */ | |||
| 112 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ | 113 | DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */ |
| 113 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ | 114 | DEFINE_PER_CPU(u64, ia64_mca_pal_pte); /* PTE to map PAL code */ |
| 114 | DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ | 115 | DEFINE_PER_CPU(u64, ia64_mca_pal_base); /* vaddr PAL code granule */ |
| 116 | DEFINE_PER_CPU(u64, ia64_mca_tr_reload); /* Flag for TR reload */ | ||
| 115 | 117 | ||
| 116 | unsigned long __per_cpu_mca[NR_CPUS]; | 118 | unsigned long __per_cpu_mca[NR_CPUS]; |
| 117 | 119 | ||
| @@ -1182,6 +1184,49 @@ all_in: | |||
| 1182 | return; | 1184 | return; |
| 1183 | } | 1185 | } |
| 1184 | 1186 | ||
| 1187 | /* mca_insert_tr | ||
| 1188 | * | ||
| 1189 | * Switch rid when TR reload and needed! | ||
| 1190 | * iord: 1: itr, 2: itr; | ||
| 1191 | * | ||
| 1192 | */ | ||
| 1193 | static void mca_insert_tr(u64 iord) | ||
| 1194 | { | ||
| 1195 | |||
| 1196 | int i; | ||
| 1197 | u64 old_rr; | ||
| 1198 | struct ia64_tr_entry *p; | ||
| 1199 | unsigned long psr; | ||
| 1200 | int cpu = smp_processor_id(); | ||
| 1201 | |||
| 1202 | psr = ia64_clear_ic(); | ||
| 1203 | for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { | ||
| 1204 | p = &__per_cpu_idtrs[cpu][iord-1][i]; | ||
| 1205 | if (p->pte & 0x1) { | ||
| 1206 | old_rr = ia64_get_rr(p->ifa); | ||
| 1207 | if (old_rr != p->rr) { | ||
| 1208 | ia64_set_rr(p->ifa, p->rr); | ||
| 1209 | ia64_srlz_d(); | ||
| 1210 | } | ||
| 1211 | ia64_ptr(iord, p->ifa, p->itir >> 2); | ||
| 1212 | ia64_srlz_i(); | ||
| 1213 | if (iord & 0x1) { | ||
| 1214 | ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2); | ||
| 1215 | ia64_srlz_i(); | ||
| 1216 | } | ||
| 1217 | if (iord & 0x2) { | ||
| 1218 | ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2); | ||
| 1219 | ia64_srlz_i(); | ||
| 1220 | } | ||
| 1221 | if (old_rr != p->rr) { | ||
| 1222 | ia64_set_rr(p->ifa, old_rr); | ||
| 1223 | ia64_srlz_d(); | ||
| 1224 | } | ||
| 1225 | } | ||
| 1226 | } | ||
| 1227 | ia64_set_psr(psr); | ||
| 1228 | } | ||
| 1229 | |||
| 1185 | /* | 1230 | /* |
| 1186 | * ia64_mca_handler | 1231 | * ia64_mca_handler |
| 1187 | * | 1232 | * |
| @@ -1271,6 +1316,10 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw, | |||
| 1271 | monarch_cpu = -1; | 1316 | monarch_cpu = -1; |
| 1272 | #endif | 1317 | #endif |
| 1273 | } | 1318 | } |
| 1319 | if (__get_cpu_var(ia64_mca_tr_reload)) { | ||
| 1320 | mca_insert_tr(0x1); /*Reload dynamic itrs*/ | ||
| 1321 | mca_insert_tr(0x2); /*Reload dynamic itrs*/ | ||
| 1322 | } | ||
| 1274 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) | 1323 | if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, (long)&nd, 0, recover) |
| 1275 | == NOTIFY_STOP) | 1324 | == NOTIFY_STOP) |
| 1276 | ia64_mca_spin(__func__); | 1325 | ia64_mca_spin(__func__); |
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index 8bc7d259e0c6..a06d46548ff9 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S | |||
| @@ -219,8 +219,13 @@ ia64_reload_tr: | |||
| 219 | mov r20=IA64_TR_CURRENT_STACK | 219 | mov r20=IA64_TR_CURRENT_STACK |
| 220 | ;; | 220 | ;; |
| 221 | itr.d dtr[r20]=r16 | 221 | itr.d dtr[r20]=r16 |
| 222 | GET_THIS_PADDR(r2, ia64_mca_tr_reload) | ||
| 223 | mov r18 = 1 | ||
| 222 | ;; | 224 | ;; |
| 223 | srlz.d | 225 | srlz.d |
| 226 | ;; | ||
| 227 | st8 [r2] =r18 | ||
| 228 | ;; | ||
| 224 | 229 | ||
| 225 | done_tlb_purge_and_reload: | 230 | done_tlb_purge_and_reload: |
| 226 | 231 | ||
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 4e446aa5f4ac..9a9d4c489330 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
| @@ -213,6 +213,19 @@ send_IPI_allbutself (int op) | |||
| 213 | * Called with preemption disabled. | 213 | * Called with preemption disabled. |
| 214 | */ | 214 | */ |
| 215 | static inline void | 215 | static inline void |
| 216 | send_IPI_mask(cpumask_t mask, int op) | ||
| 217 | { | ||
| 218 | unsigned int cpu; | ||
| 219 | |||
| 220 | for_each_cpu_mask(cpu, mask) { | ||
| 221 | send_IPI_single(cpu, op); | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | /* | ||
| 226 | * Called with preemption disabled. | ||
| 227 | */ | ||
| 228 | static inline void | ||
| 216 | send_IPI_all (int op) | 229 | send_IPI_all (int op) |
| 217 | { | 230 | { |
| 218 | int i; | 231 | int i; |
| @@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int | |||
| 401 | } | 414 | } |
| 402 | EXPORT_SYMBOL(smp_call_function_single); | 415 | EXPORT_SYMBOL(smp_call_function_single); |
| 403 | 416 | ||
| 417 | /** | ||
| 418 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
| 419 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
| 420 | * <func> The function to run. This must be fast and non-blocking. | ||
| 421 | * <info> An arbitrary pointer to pass to the function. | ||
| 422 | * <wait> If true, wait (atomically) until function | ||
| 423 | * has completed on other CPUs. | ||
| 424 | * | ||
| 425 | * Returns 0 on success, else a negative status code. | ||
| 426 | * | ||
| 427 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 428 | * it returns just before the target cpu calls @func. | ||
| 429 | * | ||
| 430 | * You must not call this function with disabled interrupts or from a | ||
| 431 | * hardware interrupt handler or from a bottom half handler. | ||
| 432 | */ | ||
| 433 | int smp_call_function_mask(cpumask_t mask, | ||
| 434 | void (*func)(void *), void *info, | ||
| 435 | int wait) | ||
| 436 | { | ||
| 437 | struct call_data_struct data; | ||
| 438 | cpumask_t allbutself; | ||
| 439 | int cpus; | ||
| 440 | |||
| 441 | spin_lock(&call_lock); | ||
| 442 | allbutself = cpu_online_map; | ||
| 443 | cpu_clear(smp_processor_id(), allbutself); | ||
| 444 | |||
| 445 | cpus_and(mask, mask, allbutself); | ||
| 446 | cpus = cpus_weight(mask); | ||
| 447 | if (!cpus) { | ||
| 448 | spin_unlock(&call_lock); | ||
| 449 | return 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | /* Can deadlock when called with interrupts disabled */ | ||
| 453 | WARN_ON(irqs_disabled()); | ||
| 454 | |||
| 455 | data.func = func; | ||
| 456 | data.info = info; | ||
| 457 | atomic_set(&data.started, 0); | ||
| 458 | data.wait = wait; | ||
| 459 | if (wait) | ||
| 460 | atomic_set(&data.finished, 0); | ||
| 461 | |||
| 462 | call_data = &data; | ||
| 463 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
| 464 | |||
| 465 | /* Send a message to other CPUs */ | ||
| 466 | if (cpus_equal(mask, allbutself)) | ||
| 467 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
| 468 | else | ||
| 469 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
| 470 | |||
| 471 | /* Wait for response */ | ||
| 472 | while (atomic_read(&data.started) != cpus) | ||
| 473 | cpu_relax(); | ||
| 474 | |||
| 475 | if (wait) | ||
| 476 | while (atomic_read(&data.finished) != cpus) | ||
| 477 | cpu_relax(); | ||
| 478 | call_data = NULL; | ||
| 479 | |||
| 480 | spin_unlock(&call_lock); | ||
| 481 | return 0; | ||
| 482 | |||
| 483 | } | ||
| 484 | EXPORT_SYMBOL(smp_call_function_mask); | ||
| 485 | |||
| 404 | /* | 486 | /* |
| 405 | * this function sends a 'generic call function' IPI to all other CPUs | 487 | * this function sends a 'generic call function' IPI to all other CPUs |
| 406 | * in the system. | 488 | * in the system. |
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c index 655da240d13c..3d8903f936a5 100644 --- a/arch/ia64/mm/tlb.c +++ b/arch/ia64/mm/tlb.c | |||
| @@ -26,6 +26,8 @@ | |||
| 26 | #include <asm/pal.h> | 26 | #include <asm/pal.h> |
| 27 | #include <asm/tlbflush.h> | 27 | #include <asm/tlbflush.h> |
| 28 | #include <asm/dma.h> | 28 | #include <asm/dma.h> |
| 29 | #include <asm/processor.h> | ||
| 30 | #include <asm/tlb.h> | ||
| 29 | 31 | ||
| 30 | static struct { | 32 | static struct { |
| 31 | unsigned long mask; /* mask of supported purge page-sizes */ | 33 | unsigned long mask; /* mask of supported purge page-sizes */ |
| @@ -39,6 +41,10 @@ struct ia64_ctx ia64_ctx = { | |||
| 39 | }; | 41 | }; |
| 40 | 42 | ||
| 41 | DEFINE_PER_CPU(u8, ia64_need_tlb_flush); | 43 | DEFINE_PER_CPU(u8, ia64_need_tlb_flush); |
| 44 | DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ | ||
| 45 | DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ | ||
| 46 | |||
| 47 | struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; | ||
| 42 | 48 | ||
| 43 | /* | 49 | /* |
| 44 | * Initializes the ia64_ctx.bitmap array based on max_ctx+1. | 50 | * Initializes the ia64_ctx.bitmap array based on max_ctx+1. |
| @@ -190,6 +196,9 @@ ia64_tlb_init (void) | |||
| 190 | ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ | 196 | ia64_ptce_info_t uninitialized_var(ptce_info); /* GCC be quiet */ |
| 191 | unsigned long tr_pgbits; | 197 | unsigned long tr_pgbits; |
| 192 | long status; | 198 | long status; |
| 199 | pal_vm_info_1_u_t vm_info_1; | ||
| 200 | pal_vm_info_2_u_t vm_info_2; | ||
| 201 | int cpu = smp_processor_id(); | ||
| 193 | 202 | ||
| 194 | if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { | 203 | if ((status = ia64_pal_vm_page_size(&tr_pgbits, &purge.mask)) != 0) { |
| 195 | printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; " | 204 | printk(KERN_ERR "PAL_VM_PAGE_SIZE failed with status=%ld; " |
| @@ -206,4 +215,191 @@ ia64_tlb_init (void) | |||
| 206 | local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; | 215 | local_cpu_data->ptce_stride[1] = ptce_info.stride[1]; |
| 207 | 216 | ||
| 208 | local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ | 217 | local_flush_tlb_all(); /* nuke left overs from bootstrapping... */ |
| 218 | status = ia64_pal_vm_summary(&vm_info_1, &vm_info_2); | ||
| 219 | |||
| 220 | if (status) { | ||
| 221 | printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status); | ||
| 222 | per_cpu(ia64_tr_num, cpu) = 8; | ||
| 223 | return; | ||
| 224 | } | ||
| 225 | per_cpu(ia64_tr_num, cpu) = vm_info_1.pal_vm_info_1_s.max_itr_entry+1; | ||
| 226 | if (per_cpu(ia64_tr_num, cpu) > | ||
| 227 | (vm_info_1.pal_vm_info_1_s.max_dtr_entry+1)) | ||
| 228 | per_cpu(ia64_tr_num, cpu) = | ||
| 229 | vm_info_1.pal_vm_info_1_s.max_dtr_entry+1; | ||
| 230 | if (per_cpu(ia64_tr_num, cpu) > IA64_TR_ALLOC_MAX) { | ||
| 231 | per_cpu(ia64_tr_num, cpu) = IA64_TR_ALLOC_MAX; | ||
| 232 | printk(KERN_DEBUG "TR register number exceeds IA64_TR_ALLOC_MAX!" | ||
| 233 | "IA64_TR_ALLOC_MAX should be extended\n"); | ||
| 234 | } | ||
| 235 | } | ||
| 236 | |||
| 237 | /* | ||
| 238 | * is_tr_overlap | ||
| 239 | * | ||
| 240 | * Check overlap with inserted TRs. | ||
| 241 | */ | ||
| 242 | static int is_tr_overlap(struct ia64_tr_entry *p, u64 va, u64 log_size) | ||
| 243 | { | ||
| 244 | u64 tr_log_size; | ||
| 245 | u64 tr_end; | ||
| 246 | u64 va_rr = ia64_get_rr(va); | ||
| 247 | u64 va_rid = RR_TO_RID(va_rr); | ||
| 248 | u64 va_end = va + (1<<log_size) - 1; | ||
| 249 | |||
| 250 | if (va_rid != RR_TO_RID(p->rr)) | ||
| 251 | return 0; | ||
| 252 | tr_log_size = (p->itir & 0xff) >> 2; | ||
| 253 | tr_end = p->ifa + (1<<tr_log_size) - 1; | ||
| 254 | |||
| 255 | if (va > tr_end || p->ifa > va_end) | ||
| 256 | return 0; | ||
| 257 | return 1; | ||
| 258 | |||
| 259 | } | ||
| 260 | |||
| 261 | /* | ||
| 262 | * ia64_insert_tr in virtual mode. Allocate a TR slot | ||
| 263 | * | ||
| 264 | * target_mask : 0x1 : itr, 0x2 : dtr, 0x3 : idtr | ||
| 265 | * | ||
| 266 | * va : virtual address. | ||
| 267 | * pte : pte entries inserted. | ||
| 268 | * log_size: range to be covered. | ||
| 269 | * | ||
| 270 | * Return value: <0 : error No. | ||
| 271 | * | ||
| 272 | * >=0 : slot number allocated for TR. | ||
| 273 | * Must be called with preemption disabled. | ||
| 274 | */ | ||
| 275 | int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size) | ||
| 276 | { | ||
| 277 | int i, r; | ||
| 278 | unsigned long psr; | ||
| 279 | struct ia64_tr_entry *p; | ||
| 280 | int cpu = smp_processor_id(); | ||
| 281 | |||
| 282 | r = -EINVAL; | ||
| 283 | /*Check overlap with existing TR entries*/ | ||
| 284 | if (target_mask & 0x1) { | ||
| 285 | p = &__per_cpu_idtrs[cpu][0][0]; | ||
| 286 | for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); | ||
| 287 | i++, p++) { | ||
| 288 | if (p->pte & 0x1) | ||
| 289 | if (is_tr_overlap(p, va, log_size)) { | ||
| 290 | printk(KERN_DEBUG "Overlapped Entry" | ||
| 291 | "Inserted for TR Reigster!!\n"); | ||
| 292 | goto out; | ||
| 293 | } | ||
| 294 | } | ||
| 295 | } | ||
| 296 | if (target_mask & 0x2) { | ||
| 297 | p = &__per_cpu_idtrs[cpu][1][0]; | ||
| 298 | for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); | ||
| 299 | i++, p++) { | ||
| 300 | if (p->pte & 0x1) | ||
| 301 | if (is_tr_overlap(p, va, log_size)) { | ||
| 302 | printk(KERN_DEBUG "Overlapped Entry" | ||
| 303 | "Inserted for TR Reigster!!\n"); | ||
| 304 | goto out; | ||
| 305 | } | ||
| 306 | } | ||
| 307 | } | ||
| 308 | |||
| 309 | for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { | ||
| 310 | switch (target_mask & 0x3) { | ||
| 311 | case 1: | ||
| 312 | if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) | ||
| 313 | goto found; | ||
| 314 | continue; | ||
| 315 | case 2: | ||
| 316 | if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | ||
| 317 | goto found; | ||
| 318 | continue; | ||
| 319 | case 3: | ||
| 320 | if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && | ||
| 321 | !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | ||
| 322 | goto found; | ||
| 323 | continue; | ||
| 324 | default: | ||
| 325 | r = -EINVAL; | ||
| 326 | goto out; | ||
| 327 | } | ||
| 328 | } | ||
| 329 | found: | ||
| 330 | if (i >= per_cpu(ia64_tr_num, cpu)) | ||
| 331 | return -EBUSY; | ||
| 332 | |||
| 333 | /*Record tr info for mca hander use!*/ | ||
| 334 | if (i > per_cpu(ia64_tr_used, cpu)) | ||
| 335 | per_cpu(ia64_tr_used, cpu) = i; | ||
| 336 | |||
| 337 | psr = ia64_clear_ic(); | ||
| 338 | if (target_mask & 0x1) { | ||
| 339 | ia64_itr(0x1, i, va, pte, log_size); | ||
| 340 | ia64_srlz_i(); | ||
| 341 | p = &__per_cpu_idtrs[cpu][0][i]; | ||
| 342 | p->ifa = va; | ||
| 343 | p->pte = pte; | ||
| 344 | p->itir = log_size << 2; | ||
| 345 | p->rr = ia64_get_rr(va); | ||
| 346 | } | ||
| 347 | if (target_mask & 0x2) { | ||
| 348 | ia64_itr(0x2, i, va, pte, log_size); | ||
| 349 | ia64_srlz_i(); | ||
| 350 | p = &__per_cpu_idtrs[cpu][1][i]; | ||
| 351 | p->ifa = va; | ||
| 352 | p->pte = pte; | ||
| 353 | p->itir = log_size << 2; | ||
| 354 | p->rr = ia64_get_rr(va); | ||
| 355 | } | ||
| 356 | ia64_set_psr(psr); | ||
| 357 | r = i; | ||
| 358 | out: | ||
| 359 | return r; | ||
| 360 | } | ||
| 361 | EXPORT_SYMBOL_GPL(ia64_itr_entry); | ||
| 362 | |||
| 363 | /* | ||
| 364 | * ia64_purge_tr | ||
| 365 | * | ||
| 366 | * target_mask: 0x1: purge itr, 0x2 : purge dtr, 0x3 purge idtr. | ||
| 367 | * slot: slot number to be freed. | ||
| 368 | * | ||
| 369 | * Must be called with preemption disabled. | ||
| 370 | */ | ||
| 371 | void ia64_ptr_entry(u64 target_mask, int slot) | ||
| 372 | { | ||
| 373 | int cpu = smp_processor_id(); | ||
| 374 | int i; | ||
| 375 | struct ia64_tr_entry *p; | ||
| 376 | |||
| 377 | if (slot < IA64_TR_ALLOC_BASE || slot >= per_cpu(ia64_tr_num, cpu)) | ||
| 378 | return; | ||
| 379 | |||
| 380 | if (target_mask & 0x1) { | ||
| 381 | p = &__per_cpu_idtrs[cpu][0][slot]; | ||
| 382 | if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { | ||
| 383 | p->pte = 0; | ||
| 384 | ia64_ptr(0x1, p->ifa, p->itir>>2); | ||
| 385 | ia64_srlz_i(); | ||
| 386 | } | ||
| 387 | } | ||
| 388 | |||
| 389 | if (target_mask & 0x2) { | ||
| 390 | p = &__per_cpu_idtrs[cpu][1][slot]; | ||
| 391 | if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { | ||
| 392 | p->pte = 0; | ||
| 393 | ia64_ptr(0x2, p->ifa, p->itir>>2); | ||
| 394 | ia64_srlz_i(); | ||
| 395 | } | ||
| 396 | } | ||
| 397 | |||
| 398 | for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { | ||
| 399 | if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || | ||
| 400 | (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) | ||
| 401 | break; | ||
| 402 | } | ||
| 403 | per_cpu(ia64_tr_used, cpu) = i; | ||
| 209 | } | 404 | } |
| 405 | EXPORT_SYMBOL_GPL(ia64_ptr_entry); | ||
diff --git a/include/asm-ia64/kregs.h b/include/asm-ia64/kregs.h index 7e55a584975c..aefcdfee7f23 100644 --- a/include/asm-ia64/kregs.h +++ b/include/asm-ia64/kregs.h | |||
| @@ -31,6 +31,9 @@ | |||
| 31 | #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ | 31 | #define IA64_TR_PALCODE 1 /* itr1: maps PALcode as required by EFI */ |
| 32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ | 32 | #define IA64_TR_CURRENT_STACK 1 /* dtr1: maps kernel's memory- & register-stacks */ |
| 33 | 33 | ||
| 34 | #define IA64_TR_ALLOC_BASE 2 /* itr&dtr: Base of dynamic TR resource*/ | ||
| 35 | #define IA64_TR_ALLOC_MAX 32 /* Max number for dynamic use*/ | ||
| 36 | |||
| 34 | /* Processor status register bits: */ | 37 | /* Processor status register bits: */ |
| 35 | #define IA64_PSR_BE_BIT 1 | 38 | #define IA64_PSR_BE_BIT 1 |
| 36 | #define IA64_PSR_UP_BIT 2 | 39 | #define IA64_PSR_UP_BIT 2 |
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 4fa733dd417a..ec5f355fb7e3 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h | |||
| @@ -38,6 +38,9 @@ ia64_get_lid (void) | |||
| 38 | return lid.f.id << 8 | lid.f.eid; | 38 | return lid.f.id << 8 | lid.f.eid; |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
| 42 | void *info, int wait); | ||
| 43 | |||
| 41 | #define hard_smp_processor_id() ia64_get_lid() | 44 | #define hard_smp_processor_id() ia64_get_lid() |
| 42 | 45 | ||
| 43 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP |
diff --git a/include/asm-ia64/tlb.h b/include/asm-ia64/tlb.h index 26edcb750f9f..20d8a39680c2 100644 --- a/include/asm-ia64/tlb.h +++ b/include/asm-ia64/tlb.h | |||
| @@ -64,6 +64,32 @@ struct mmu_gather { | |||
| 64 | struct page *pages[FREE_PTE_NR]; | 64 | struct page *pages[FREE_PTE_NR]; |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | struct ia64_tr_entry { | ||
| 68 | u64 ifa; | ||
| 69 | u64 itir; | ||
| 70 | u64 pte; | ||
| 71 | u64 rr; | ||
| 72 | }; /*Record for tr entry!*/ | ||
| 73 | |||
| 74 | extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); | ||
| 75 | extern void ia64_ptr_entry(u64 target_mask, int slot); | ||
| 76 | |||
| 77 | extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; | ||
| 78 | |||
| 79 | /* | ||
| 80 | region register macros | ||
| 81 | */ | ||
| 82 | #define RR_TO_VE(val) (((val) >> 0) & 0x0000000000000001) | ||
| 83 | #define RR_VE(val) (((val) & 0x0000000000000001) << 0) | ||
| 84 | #define RR_VE_MASK 0x0000000000000001L | ||
| 85 | #define RR_VE_SHIFT 0 | ||
| 86 | #define RR_TO_PS(val) (((val) >> 2) & 0x000000000000003f) | ||
| 87 | #define RR_PS(val) (((val) & 0x000000000000003f) << 2) | ||
| 88 | #define RR_PS_MASK 0x00000000000000fcL | ||
| 89 | #define RR_PS_SHIFT 2 | ||
| 90 | #define RR_RID_MASK 0x00000000ffffff00L | ||
| 91 | #define RR_TO_RID(val) ((val >> 8) & 0xffffff) | ||
| 92 | |||
| 67 | /* Users of the generic TLB shootdown code must declare this storage space. */ | 93 | /* Users of the generic TLB shootdown code must declare this storage space. */ |
| 68 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 94 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 69 | 95 | ||
