aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h8
-rw-r--r--arch/powerpc/include/asm/page.h2
-rw-r--r--arch/powerpc/kernel/cpu_setup_6xx.S40
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/mm/numa.c55
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c37
6 files changed, 94 insertions, 52 deletions
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 8eaed81ea642..17194fcd4040 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -40,8 +40,8 @@
40 40
41/* MAS registers bit definitions */ 41/* MAS registers bit definitions */
42 42
43#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) 43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
44#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) 44#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
45#define MAS0_NV(x) ((x) & 0x00000FFF) 45#define MAS0_NV(x) ((x) & 0x00000FFF)
46#define MAS0_HES 0x00004000 46#define MAS0_HES 0x00004000
47#define MAS0_WQ_ALLWAYS 0x00000000 47#define MAS0_WQ_ALLWAYS 0x00000000
@@ -50,12 +50,12 @@
50 50
51#define MAS1_VALID 0x80000000 51#define MAS1_VALID 0x80000000
52#define MAS1_IPROT 0x40000000 52#define MAS1_IPROT 0x40000000
53#define MAS1_TID(x) ((x << 16) & 0x3FFF0000) 53#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000)
54#define MAS1_IND 0x00002000 54#define MAS1_IND 0x00002000
55#define MAS1_TS 0x00001000 55#define MAS1_TS 0x00001000
56#define MAS1_TSIZE_MASK 0x00000f80 56#define MAS1_TSIZE_MASK 0x00000f80
57#define MAS1_TSIZE_SHIFT 7 57#define MAS1_TSIZE_SHIFT 7
58#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) 58#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK)
59 59
60#define MAS2_EPN 0xFFFFF000 60#define MAS2_EPN 0xFFFFF000
61#define MAS2_X0 0x00000040 61#define MAS2_X0 0x00000040
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 53b64be40eb2..da4b20008541 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr;
101 101
102#ifdef CONFIG_FLATMEM 102#ifdef CONFIG_FLATMEM
103#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) 103#define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT)
104#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) 104#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr)
105#endif 105#endif
106 106
107#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 107#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S
index 55cba4a8a959..f8cd9fba4d35 100644
--- a/arch/powerpc/kernel/cpu_setup_6xx.S
+++ b/arch/powerpc/kernel/cpu_setup_6xx.S
@@ -18,7 +18,7 @@
18#include <asm/mmu.h> 18#include <asm/mmu.h>
19 19
20_GLOBAL(__setup_cpu_603) 20_GLOBAL(__setup_cpu_603)
21 mflr r4 21 mflr r5
22BEGIN_MMU_FTR_SECTION 22BEGIN_MMU_FTR_SECTION
23 li r10,0 23 li r10,0
24 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ 24 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION
27 bl __init_fpu_registers 27 bl __init_fpu_registers
28END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) 28END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE)
29 bl setup_common_caches 29 bl setup_common_caches
30 mtlr r4 30 mtlr r5
31 blr 31 blr
32_GLOBAL(__setup_cpu_604) 32_GLOBAL(__setup_cpu_604)
33 mflr r4 33 mflr r5
34 bl setup_common_caches 34 bl setup_common_caches
35 bl setup_604_hid0 35 bl setup_604_hid0
36 mtlr r4 36 mtlr r5
37 blr 37 blr
38_GLOBAL(__setup_cpu_750) 38_GLOBAL(__setup_cpu_750)
39 mflr r4 39 mflr r5
40 bl __init_fpu_registers 40 bl __init_fpu_registers
41 bl setup_common_caches 41 bl setup_common_caches
42 bl setup_750_7400_hid0 42 bl setup_750_7400_hid0
43 mtlr r4 43 mtlr r5
44 blr 44 blr
45_GLOBAL(__setup_cpu_750cx) 45_GLOBAL(__setup_cpu_750cx)
46 mflr r4 46 mflr r5
47 bl __init_fpu_registers 47 bl __init_fpu_registers
48 bl setup_common_caches 48 bl setup_common_caches
49 bl setup_750_7400_hid0 49 bl setup_750_7400_hid0
50 bl setup_750cx 50 bl setup_750cx
51 mtlr r4 51 mtlr r5
52 blr 52 blr
53_GLOBAL(__setup_cpu_750fx) 53_GLOBAL(__setup_cpu_750fx)
54 mflr r4 54 mflr r5
55 bl __init_fpu_registers 55 bl __init_fpu_registers
56 bl setup_common_caches 56 bl setup_common_caches
57 bl setup_750_7400_hid0 57 bl setup_750_7400_hid0
58 bl setup_750fx 58 bl setup_750fx
59 mtlr r4 59 mtlr r5
60 blr 60 blr
61_GLOBAL(__setup_cpu_7400) 61_GLOBAL(__setup_cpu_7400)
62 mflr r4 62 mflr r5
63 bl __init_fpu_registers 63 bl __init_fpu_registers
64 bl setup_7400_workarounds 64 bl setup_7400_workarounds
65 bl setup_common_caches 65 bl setup_common_caches
66 bl setup_750_7400_hid0 66 bl setup_750_7400_hid0
67 mtlr r4 67 mtlr r5
68 blr 68 blr
69_GLOBAL(__setup_cpu_7410) 69_GLOBAL(__setup_cpu_7410)
70 mflr r4 70 mflr r5
71 bl __init_fpu_registers 71 bl __init_fpu_registers
72 bl setup_7410_workarounds 72 bl setup_7410_workarounds
73 bl setup_common_caches 73 bl setup_common_caches
74 bl setup_750_7400_hid0 74 bl setup_750_7400_hid0
75 li r3,0 75 li r3,0
76 mtspr SPRN_L2CR2,r3 76 mtspr SPRN_L2CR2,r3
77 mtlr r4 77 mtlr r5
78 blr 78 blr
79_GLOBAL(__setup_cpu_745x) 79_GLOBAL(__setup_cpu_745x)
80 mflr r4 80 mflr r5
81 bl setup_common_caches 81 bl setup_common_caches
82 bl setup_745x_specifics 82 bl setup_745x_specifics
83 mtlr r4 83 mtlr r5
84 blr 84 blr
85 85
86/* Enable caches for 603's, 604, 750 & 7400 */ 86/* Enable caches for 603's, 604, 750 & 7400 */
@@ -194,10 +194,10 @@ setup_750cx:
194 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq 194 cror 4*cr0+eq,4*cr0+eq,4*cr1+eq
195 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq 195 cror 4*cr0+eq,4*cr0+eq,4*cr2+eq
196 bnelr 196 bnelr
197 lwz r6,CPU_SPEC_FEATURES(r5) 197 lwz r6,CPU_SPEC_FEATURES(r4)
198 li r7,CPU_FTR_CAN_NAP 198 li r7,CPU_FTR_CAN_NAP
199 andc r6,r6,r7 199 andc r6,r6,r7
200 stw r6,CPU_SPEC_FEATURES(r5) 200 stw r6,CPU_SPEC_FEATURES(r4)
201 blr 201 blr
202 202
203/* 750fx specific 203/* 750fx specific
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION
225 andis. r11,r11,L3CR_L3E@h 225 andis. r11,r11,L3CR_L3E@h
226 beq 1f 226 beq 1f
227END_FTR_SECTION_IFSET(CPU_FTR_L3CR) 227END_FTR_SECTION_IFSET(CPU_FTR_L3CR)
228 lwz r6,CPU_SPEC_FEATURES(r5) 228 lwz r6,CPU_SPEC_FEATURES(r4)
229 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP 229 andi. r0,r6,CPU_FTR_L3_DISABLE_NAP
230 beq 1f 230 beq 1f
231 li r7,CPU_FTR_CAN_NAP 231 li r7,CPU_FTR_CAN_NAP
232 andc r6,r6,r7 232 andc r6,r6,r7
233 stw r6,CPU_SPEC_FEATURES(r5) 233 stw r6,CPU_SPEC_FEATURES(r4)
2341: 2341:
235 mfspr r11,SPRN_HID0 235 mfspr r11,SPRN_HID0
236 236
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 8d74a24c5502..e8e915ce3d8d 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s)
2076 * pointer on ppc64 and booke as we are running at 0 in real mode 2076 * pointer on ppc64 and booke as we are running at 0 in real mode
2077 * on ppc64 and reloc_offset is always 0 on booke. 2077 * on ppc64 and reloc_offset is always 0 on booke.
2078 */ 2078 */
2079 if (s->cpu_setup) { 2079 if (t->cpu_setup) {
2080 s->cpu_setup(offset, s); 2080 t->cpu_setup(offset, t);
2081 } 2081 }
2082#endif /* CONFIG_PPC64 || CONFIG_BOOKE */ 2082#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
2083} 2083}
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index bf5cb91f07de..fd4812329570 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
186 dbg("removing cpu %lu from node %d\n", cpu, node); 186 dbg("removing cpu %lu from node %d\n", cpu, node);
187 187
188 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { 188 if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
189 cpumask_set_cpu(cpu, node_to_cpumask_map[node]); 189 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
190 } else { 190 } else {
191 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 191 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
192 cpu, node); 192 cpu, node);
@@ -1289,10 +1289,9 @@ u64 memory_hotplug_max(void)
1289} 1289}
1290#endif /* CONFIG_MEMORY_HOTPLUG */ 1290#endif /* CONFIG_MEMORY_HOTPLUG */
1291 1291
1292/* Vrtual Processor Home Node (VPHN) support */ 1292/* Virtual Processor Home Node (VPHN) support */
1293#ifdef CONFIG_PPC_SPLPAR 1293#ifdef CONFIG_PPC_SPLPAR
1294#define VPHN_NR_CHANGE_CTRS (8) 1294static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1295static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS];
1296static cpumask_t cpu_associativity_changes_mask; 1295static cpumask_t cpu_associativity_changes_mask;
1297static int vphn_enabled; 1296static int vphn_enabled;
1298static void set_topology_timer(void); 1297static void set_topology_timer(void);
@@ -1303,16 +1302,18 @@ static void set_topology_timer(void);
1303 */ 1302 */
1304static void setup_cpu_associativity_change_counters(void) 1303static void setup_cpu_associativity_change_counters(void)
1305{ 1304{
1306 int cpu = 0; 1305 int cpu;
1306
1307 /* The VPHN feature supports a maximum of 8 reference points */
1308 BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);
1307 1309
1308 for_each_possible_cpu(cpu) { 1310 for_each_possible_cpu(cpu) {
1309 int i = 0; 1311 int i;
1310 u8 *counts = vphn_cpu_change_counts[cpu]; 1312 u8 *counts = vphn_cpu_change_counts[cpu];
1311 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1313 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1312 1314
1313 for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { 1315 for (i = 0; i < distance_ref_points_depth; i++)
1314 counts[i] = hypervisor_counts[i]; 1316 counts[i] = hypervisor_counts[i];
1315 }
1316 } 1317 }
1317} 1318}
1318 1319
@@ -1329,7 +1330,7 @@ static void setup_cpu_associativity_change_counters(void)
1329 */ 1330 */
1330static int update_cpu_associativity_changes_mask(void) 1331static int update_cpu_associativity_changes_mask(void)
1331{ 1332{
1332 int cpu = 0, nr_cpus = 0; 1333 int cpu, nr_cpus = 0;
1333 cpumask_t *changes = &cpu_associativity_changes_mask; 1334 cpumask_t *changes = &cpu_associativity_changes_mask;
1334 1335
1335 cpumask_clear(changes); 1336 cpumask_clear(changes);
@@ -1339,8 +1340,8 @@ static int update_cpu_associativity_changes_mask(void)
1339 u8 *counts = vphn_cpu_change_counts[cpu]; 1340 u8 *counts = vphn_cpu_change_counts[cpu];
1340 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; 1341 volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;
1341 1342
1342 for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { 1343 for (i = 0; i < distance_ref_points_depth; i++) {
1343 if (hypervisor_counts[i] > counts[i]) { 1344 if (hypervisor_counts[i] != counts[i]) {
1344 counts[i] = hypervisor_counts[i]; 1345 counts[i] = hypervisor_counts[i];
1345 changed = 1; 1346 changed = 1;
1346 } 1347 }
@@ -1354,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void)
1354 return nr_cpus; 1355 return nr_cpus;
1355} 1356}
1356 1357
1357/* 6 64-bit registers unpacked into 12 32-bit associativity values */ 1358/*
1358#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32)) 1359 * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
1360 * the complete property we have to add the length in the first cell.
1361 */
1362#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
1359 1363
1360/* 1364/*
1361 * Convert the associativity domain numbers returned from the hypervisor 1365 * Convert the associativity domain numbers returned from the hypervisor
@@ -1363,15 +1367,14 @@ static int update_cpu_associativity_changes_mask(void)
1363 */ 1367 */
1364static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) 1368static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1365{ 1369{
1366 int i = 0; 1370 int i, nr_assoc_doms = 0;
1367 int nr_assoc_doms = 0;
1368 const u16 *field = (const u16*) packed; 1371 const u16 *field = (const u16*) packed;
1369 1372
1370#define VPHN_FIELD_UNUSED (0xffff) 1373#define VPHN_FIELD_UNUSED (0xffff)
1371#define VPHN_FIELD_MSB (0x8000) 1374#define VPHN_FIELD_MSB (0x8000)
1372#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) 1375#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
1373 1376
1374 for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) { 1377 for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
1375 if (*field == VPHN_FIELD_UNUSED) { 1378 if (*field == VPHN_FIELD_UNUSED) {
1376 /* All significant fields processed, and remaining 1379 /* All significant fields processed, and remaining
1377 * fields contain the reserved value of all 1's. 1380 * fields contain the reserved value of all 1's.
@@ -1379,14 +1382,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1379 */ 1382 */
1380 unpacked[i] = *((u32*)field); 1383 unpacked[i] = *((u32*)field);
1381 field += 2; 1384 field += 2;
1382 } 1385 } else if (*field & VPHN_FIELD_MSB) {
1383 else if (*field & VPHN_FIELD_MSB) {
1384 /* Data is in the lower 15 bits of this field */ 1386 /* Data is in the lower 15 bits of this field */
1385 unpacked[i] = *field & VPHN_FIELD_MASK; 1387 unpacked[i] = *field & VPHN_FIELD_MASK;
1386 field++; 1388 field++;
1387 nr_assoc_doms++; 1389 nr_assoc_doms++;
1388 } 1390 } else {
1389 else {
1390 /* Data is in the lower 15 bits of this field 1391 /* Data is in the lower 15 bits of this field
1391 * concatenated with the next 16 bit field 1392 * concatenated with the next 16 bit field
1392 */ 1393 */
@@ -1396,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1396 } 1397 }
1397 } 1398 }
1398 1399
1400 /* The first cell contains the length of the property */
1401 unpacked[0] = nr_assoc_doms;
1402
1399 return nr_assoc_doms; 1403 return nr_assoc_doms;
1400} 1404}
1401 1405
@@ -1405,7 +1409,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked)
1405 */ 1409 */
1406static long hcall_vphn(unsigned long cpu, unsigned int *associativity) 1410static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1407{ 1411{
1408 long rc = 0; 1412 long rc;
1409 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1413 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1410 u64 flags = 1; 1414 u64 flags = 1;
1411 int hwcpu = get_hard_smp_processor_id(cpu); 1415 int hwcpu = get_hard_smp_processor_id(cpu);
@@ -1419,7 +1423,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity)
1419static long vphn_get_associativity(unsigned long cpu, 1423static long vphn_get_associativity(unsigned long cpu,
1420 unsigned int *associativity) 1424 unsigned int *associativity)
1421{ 1425{
1422 long rc = 0; 1426 long rc;
1423 1427
1424 rc = hcall_vphn(cpu, associativity); 1428 rc = hcall_vphn(cpu, associativity);
1425 1429
@@ -1445,9 +1449,9 @@ static long vphn_get_associativity(unsigned long cpu,
1445 */ 1449 */
1446int arch_update_cpu_topology(void) 1450int arch_update_cpu_topology(void)
1447{ 1451{
1448 int cpu = 0, nid = 0, old_nid = 0; 1452 int cpu, nid, old_nid;
1449 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; 1453 unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
1450 struct sys_device *sysdev = NULL; 1454 struct sys_device *sysdev;
1451 1455
1452 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { 1456 for_each_cpu_mask(cpu, cpu_associativity_changes_mask) {
1453 vphn_get_associativity(cpu, associativity); 1457 vphn_get_associativity(cpu, associativity);
@@ -1512,7 +1516,8 @@ int start_topology_update(void)
1512{ 1516{
1513 int rc = 0; 1517 int rc = 0;
1514 1518
1515 if (firmware_has_feature(FW_FEATURE_VPHN)) { 1519 if (firmware_has_feature(FW_FEATURE_VPHN) &&
1520 get_lppaca()->shared_proc) {
1516 vphn_enabled = 1; 1521 vphn_enabled = 1;
1517 setup_cpu_associativity_change_counters(); 1522 setup_cpu_associativity_change_counters();
1518 init_timer_deferrable(&topology_timer); 1523 init_timer_deferrable(&topology_timer);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 5d3ea9f60dd7..ca5d5898d320 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page);
713/* NB: reg/unreg are called while guarded with the tracepoints_mutex */ 713/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
714extern long hcall_tracepoint_refcount; 714extern long hcall_tracepoint_refcount;
715 715
716/*
717 * Since the tracing code might execute hcalls we need to guard against
718 * recursion. One example of this are spinlocks calling H_YIELD on
719 * shared processor partitions.
720 */
721static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
722
716void hcall_tracepoint_regfunc(void) 723void hcall_tracepoint_regfunc(void)
717{ 724{
718 hcall_tracepoint_refcount++; 725 hcall_tracepoint_refcount++;
@@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void)
725 732
726void __trace_hcall_entry(unsigned long opcode, unsigned long *args) 733void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
727{ 734{
735 unsigned long flags;
736 unsigned int *depth;
737
738 local_irq_save(flags);
739
740 depth = &__get_cpu_var(hcall_trace_depth);
741
742 if (*depth)
743 goto out;
744
745 (*depth)++;
728 trace_hcall_entry(opcode, args); 746 trace_hcall_entry(opcode, args);
747 (*depth)--;
748
749out:
750 local_irq_restore(flags);
729} 751}
730 752
731void __trace_hcall_exit(long opcode, unsigned long retval, 753void __trace_hcall_exit(long opcode, unsigned long retval,
732 unsigned long *retbuf) 754 unsigned long *retbuf)
733{ 755{
756 unsigned long flags;
757 unsigned int *depth;
758
759 local_irq_save(flags);
760
761 depth = &__get_cpu_var(hcall_trace_depth);
762
763 if (*depth)
764 goto out;
765
766 (*depth)++;
734 trace_hcall_exit(opcode, retval, retbuf); 767 trace_hcall_exit(opcode, retval, retbuf);
768 (*depth)--;
769
770out:
771 local_irq_restore(flags);
735} 772}
736#endif 773#endif