From 81c386cc7f4c22b81ba94709d2d58754282ea05e Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Thu, 27 Jan 2011 10:31:38 +0000 Subject: powerpc: Fix pfn_valid() when memory starts at a non-zero address max_mapnr is a pfn, not an index innto mem_map[]. So don't add ARCH_PFN_OFFSET a second time. Signed-off-by: Scott Wood Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/page.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 53b64be..da4b200 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -101,7 +101,7 @@ extern phys_addr_t kernstart_addr; #ifdef CONFIG_FLATMEM #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) -#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) +#define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) #endif #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -- cgit v1.1 From b51cbd41a3f05fb374420dac003deb401a7cd137 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Thu, 27 Jan 2011 10:30:00 +0000 Subject: powerpc/book3e: Protect complex macro args in mmu-book3e.h Signed-off-by: Scott Wood Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/mmu-book3e.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 8eaed81..17194fc 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -40,8 +40,8 @@ /* MAS registers bit definitions */ -#define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) -#define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) +#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) +#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) #define MAS0_NV(x) ((x) & 0x00000FFF) #define MAS0_HES 0x00004000 #define MAS0_WQ_ALLWAYS 0x00000000 @@ -50,12 +50,12 @@ #define MAS1_VALID 0x80000000 #define MAS1_IPROT 0x40000000 -#define MAS1_TID(x) ((x << 16) & 0x3FFF0000) +#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) #define MAS1_IND 0x00002000 #define MAS1_TS 0x00001000 #define MAS1_TSIZE_MASK 0x00000f80 #define MAS1_TSIZE_SHIFT 7 -#define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) +#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) #define MAS2_EPN 0xFFFFF000 #define MAS2_X0 0x00000040 -- cgit v1.1 From af9eef3c7b1ed004c378c89b87642f4937337d50 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 20 Jan 2011 20:36:03 +0000 Subject: powerpc: Pass the right cpu_spec to ->setup_cpu() on 64-bit When calling setup_cpu() on 64-bit, we pass a pointer to the cputable entry we have found. This used to be fine when cur_cpu_spec was a pointer to that entry, but nowadays, we copy the entry into a separate variable, and we do so before we call the setup_cpu() callback. That means that any attempt by that callback at patching the CPU table entry (to adjust CPU features for example) will patch the wrong table. Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/cputable.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 8d74a24..e8e915c 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2076,8 +2076,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) * pointer on ppc64 and booke as we are running at 0 in real mode * on ppc64 and reloc_offset is always 0 on booke. */ - if (s->cpu_setup) { - s->cpu_setup(offset, s); + if (t->cpu_setup) { + t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ } -- cgit v1.1 From 1f1936ff3febf38d582177ea319eaa278f32c91f Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 20 Jan 2011 20:35:23 +0000 Subject: powerpc: Fix some 6xx/7xxx CPU setup functions Some of those functions try to adjust the CPU features, for example to remove NAP support on some revisions. However, they seem to use r5 as an index into the CPU table entry, which might have been right a long time ago but no longer is. r4 is the right register to use. This probably caused some off behaviours on some PowerMac variants using 750cx or 7455 processor revisions. Signed-off-by: Benjamin Herrenschmidt CC: stable@kernel.org --- arch/powerpc/kernel/cpu_setup_6xx.S | 40 ++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 20 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a..f8cd9fb 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S @@ -18,7 +18,7 @@ #include _GLOBAL(__setup_cpu_603) - mflr r4 + mflr r5 BEGIN_MMU_FTR_SECTION li r10,0 mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ @@ -27,60 +27,60 @@ BEGIN_FTR_SECTION bl __init_fpu_registers END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) bl setup_common_caches - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_604) - mflr r4 + mflr r5 bl setup_common_caches bl setup_604_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750cx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750cx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_750fx) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_common_caches bl setup_750_7400_hid0 bl setup_750fx - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7400) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7400_workarounds bl setup_common_caches bl setup_750_7400_hid0 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_7410) - mflr r4 + mflr r5 bl __init_fpu_registers bl setup_7410_workarounds bl setup_common_caches bl setup_750_7400_hid0 li r3,0 mtspr SPRN_L2CR2,r3 - mtlr r4 + mtlr r5 blr _GLOBAL(__setup_cpu_745x) - mflr r4 + mflr r5 bl setup_common_caches bl setup_745x_specifics - mtlr r4 + mtlr r5 blr /* Enable caches for 603's, 604, 750 & 7400 */ @@ -194,10 +194,10 @@ setup_750cx: cror 4*cr0+eq,4*cr0+eq,4*cr1+eq cror 4*cr0+eq,4*cr0+eq,4*cr2+eq bnelr - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) blr /* 750fx specific @@ -225,12 +225,12 @@ BEGIN_FTR_SECTION andis. r11,r11,L3CR_L3E@h beq 1f END_FTR_SECTION_IFSET(CPU_FTR_L3CR) - lwz r6,CPU_SPEC_FEATURES(r5) + lwz r6,CPU_SPEC_FEATURES(r4) andi. r0,r6,CPU_FTR_L3_DISABLE_NAP beq 1f li r7,CPU_FTR_CAN_NAP andc r6,r6,r7 - stw r6,CPU_SPEC_FEATURES(r5) + stw r6,CPU_SPEC_FEATURES(r4) 1: mfspr r11,SPRN_HID0 -- cgit v1.1 From bd03403ad5d789f75974985b698cffcd23ef9346 Mon Sep 17 00:00:00 2001 From: Jesse Larrew Date: Thu, 20 Jan 2011 19:00:51 +0000 Subject: powerpc/pseries: Fix typo in VPHN comments Correct a spelling error in VPHN comments in numa.c. Signed-off-by: Jesse Larrew Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index bf5cb91..4d7f9e7 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1289,7 +1289,7 @@ u64 memory_hotplug_max(void) } #endif /* CONFIG_MEMORY_HOTPLUG */ -/* Vrtual Processor Home Node (VPHN) support */ +/* Virtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR #define VPHN_NR_CHANGE_CTRS (8) static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS]; -- cgit v1.1 From 7639adaafbfe988d814d45181389fac7697d844e Mon Sep 17 00:00:00 2001 From: Jesse Larrew Date: Thu, 20 Jan 2011 19:01:13 +0000 Subject: powerpc/pseries: Fix brace placement in numa.c Fix brace placement in VPHN code. Signed-off-by: Jesse Larrew Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 4d7f9e7..6c0cd06 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1310,9 +1310,8 @@ static void setup_cpu_associativity_change_counters(void) u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { + for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) counts[i] = hypervisor_counts[i]; - } } } @@ -1379,14 +1378,12 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ unpacked[i] = *((u32*)field); field += 2; - } - else if (*field & VPHN_FIELD_MSB) { + } else if (*field & VPHN_FIELD_MSB) { /* Data is in the lower 15 bits of this field */ unpacked[i] = *field & VPHN_FIELD_MASK; field++; nr_assoc_doms++; - } - else { + } else { /* Data is in the lower 15 bits of this field * concatenated with the next 16 bit field */ -- cgit v1.1 From cd9d6cc7266ca7f3ad9bacb3262a0fda38f13c6f Mon Sep 17 00:00:00 2001 From: Jesse Larrew Date: Thu, 20 Jan 2011 19:01:35 +0000 Subject: powerpc/pseries: Remove unnecessary variable initializations in numa.c Remove unnecessary variable initializations in VPHN functions. Signed-off-by: Jesse Larrew Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6c0cd06..9e36caf 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1303,10 +1303,10 @@ static void set_topology_timer(void); */ static void setup_cpu_associativity_change_counters(void) { - int cpu = 0; + int cpu; for_each_possible_cpu(cpu) { - int i = 0; + int i; u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; @@ -1328,7 +1328,7 @@ static void setup_cpu_associativity_change_counters(void) */ static int update_cpu_associativity_changes_mask(void) { - int cpu = 0, nr_cpus = 0; + int cpu, nr_cpus = 0; cpumask_t *changes = &cpu_associativity_changes_mask; cpumask_clear(changes); @@ -1362,8 +1362,7 @@ static int update_cpu_associativity_changes_mask(void) */ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) { - int i = 0; - int nr_assoc_doms = 0; + int i, nr_assoc_doms = 0; const u16 *field = (const u16*) packed; #define VPHN_FIELD_UNUSED (0xffff) @@ -1402,7 +1401,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) */ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) { - long rc = 0; + long rc; long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; u64 flags = 1; int hwcpu = get_hard_smp_processor_id(cpu); @@ -1416,7 +1415,7 @@ static long hcall_vphn(unsigned long cpu, unsigned int *associativity) static long vphn_get_associativity(unsigned long cpu, unsigned int *associativity) { - long rc = 0; + long rc; rc = hcall_vphn(cpu, associativity); @@ -1442,9 +1441,9 @@ static long vphn_get_associativity(unsigned long cpu, */ int arch_update_cpu_topology(void) { - int cpu = 0, nid = 0, old_nid = 0; + int cpu, nid, old_nid; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; - struct sys_device *sysdev = NULL; + struct sys_device *sysdev; for_each_cpu_mask(cpu, cpu_associativity_changes_mask) { vphn_get_associativity(cpu, associativity); -- cgit v1.1 From 5de1669910a59025e6cf24baef242a6c264d5752 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sat, 29 Jan 2011 12:24:34 +0000 Subject: powerpc/numa: Only use active VPHN count fields VPHN supports up to 8 distance fields but the number of entries in ibm,associativity-reference-points signifies how many are in use. Don't look at all the VPHN counts, only distance_ref_points_depth worth. Since we already cap our distance metrics at MAX_DISTANCE_REF_POINTS, use that to size the VPHN arrays and add a BUILD_BUG_ON to avoid it growing larger than the VPHN maximum of 8. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 9e36caf..f25633d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1291,8 +1291,7 @@ u64 memory_hotplug_max(void) /* Virtual Processor Home Node (VPHN) support */ #ifdef CONFIG_PPC_SPLPAR -#define VPHN_NR_CHANGE_CTRS (8) -static u8 vphn_cpu_change_counts[NR_CPUS][VPHN_NR_CHANGE_CTRS]; +static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; static cpumask_t cpu_associativity_changes_mask; static int vphn_enabled; static void set_topology_timer(void); @@ -1305,12 +1304,15 @@ static void setup_cpu_associativity_change_counters(void) { int cpu; + /* The VPHN feature supports a maximum of 8 reference points */ + BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); + for_each_possible_cpu(cpu) { int i; u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) + for (i = 0; i < distance_ref_points_depth; i++) counts[i] = hypervisor_counts[i]; } } @@ -1338,7 +1340,7 @@ static int update_cpu_associativity_changes_mask(void) u8 *counts = vphn_cpu_change_counts[cpu]; volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; - for (i = 0; i < VPHN_NR_CHANGE_CTRS; i++) { + for (i = 0; i < distance_ref_points_depth; i++) { if (hypervisor_counts[i] > counts[i]) { counts[i] = hypervisor_counts[i]; changed = 1; -- cgit v1.1 From d69043e8069f493ebee9472bcc78b3f54c5c27d9 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sat, 29 Jan 2011 12:26:19 +0000 Subject: powerpc/numa: Check for all VPHN changes The hypervisor uses unsigned 1 byte counters to signal topology changes to the OS. Since they can wrap we need to check for any difference, not just if the hypervisor count is greater than the previous count. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f25633d..b2937ef 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1341,7 +1341,7 @@ static int update_cpu_associativity_changes_mask(void) volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; for (i = 0; i < distance_ref_points_depth; i++) { - if (hypervisor_counts[i] > counts[i]) { + if (hypervisor_counts[i] != counts[i]) { counts[i] = hypervisor_counts[i]; changed = 1; } -- cgit v1.1 From c0e5e46f3911a451b6915feda709fd1b9b7f026a Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sat, 29 Jan 2011 12:28:04 +0000 Subject: powerpc/numa: Add length when creating OF properties via VPHN The rest of the NUMA code expects an OF associativity property with the first cell containing the length. Without this fix all topology changes cause us to misparse the property and put the cpu into node 0. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b2937ef..d07cd08 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1355,8 +1355,11 @@ static int update_cpu_associativity_changes_mask(void) return nr_cpus; } -/* 6 64-bit registers unpacked into 12 32-bit associativity values */ -#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32)) +/* + * 6 64-bit registers unpacked into 12 32-bit associativity values. To form + * the complete property we have to add the length in the first cell. + */ +#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) /* * Convert the associativity domain numbers returned from the hypervisor @@ -1371,7 +1374,7 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) #define VPHN_FIELD_MSB (0x8000) #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) - for (i = 0; i < VPHN_ASSOC_BUFSIZE; i++) { + for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { if (*field == VPHN_FIELD_UNUSED) { /* All significant fields processed, and remaining * fields contain the reserved value of all 1's. @@ -1394,6 +1397,9 @@ static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) } } + /* The first cell contains the length of the property */ + unpacked[0] = nr_assoc_doms; + return nr_assoc_doms; } -- cgit v1.1 From fe5cfd63557b39007460d17c585b8dc5ed6ace93 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sat, 29 Jan 2011 12:35:22 +0000 Subject: powerpc/numa: Disable VPHN on dedicated processor partitions There is no need to start up the timer and monitor topology changes on a dedicated processor partition, so disable it. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index d07cd08..3cf3349 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1516,7 +1516,8 @@ int start_topology_update(void) { int rc = 0; - if (firmware_has_feature(FW_FEATURE_VPHN)) { + if (firmware_has_feature(FW_FEATURE_VPHN) && + get_lppaca()->shared_proc) { vphn_enabled = 1; setup_cpu_associativity_change_counters(); init_timer_deferrable(&topology_timer); -- cgit v1.1 From 429f4d8d20b91e4a6c239f951c06a56a6ac22957 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sat, 29 Jan 2011 12:37:16 +0000 Subject: powerpc/numa: Fix bug in unmap_cpu_from_node When converting to the new cpumask code I screwed up: - if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { - cpu_clear(cpu, numa_cpumask_lookup_table[node]); + if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { + cpumask_set_cpu(cpu, node_to_cpumask_map[node]); This was introduced in commit 25863de07af9 (powerpc/cpumask: Convert NUMA code to new cpumask API) Fix it. Signed-off-by: Anton Blanchard Cc: Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/mm/numa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/powerpc') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 3cf3349..fd48123 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -186,7 +186,7 @@ static void unmap_cpu_from_node(unsigned long cpu) dbg("removing cpu %lu from node %d\n", cpu, node); if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { - cpumask_set_cpu(cpu, node_to_cpumask_map[node]); + cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); } else { printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", cpu, node); -- cgit v1.1 From 57cdfdf829a850a317425ed93c6a576c9ee6329c Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 21 Oct 2010 00:52:12 +0000 Subject: powerpc: Fix hcall tracepoint recursion Spinlocks on shared processor partitions use H_YIELD to notify the hypervisor we are waiting on another virtual CPU. Unfortunately this means the hcall tracepoints can recurse. The patch below adds a percpu depth and checks it on both the entry and exit hcall tracepoints. Signed-off-by: Anton Blanchard Acked-by: Steven Rostedt Signed-off-by: Benjamin Herrenschmidt CC: stable@kernel.org --- arch/powerpc/platforms/pseries/lpar.c | 37 +++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'arch/powerpc') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 5d3ea9f..ca5d589 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -713,6 +713,13 @@ EXPORT_SYMBOL(arch_free_page); /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long hcall_tracepoint_refcount; +/* + * Since the tracing code might execute hcalls we need to guard against + * recursion. One example of this are spinlocks calling H_YIELD on + * shared processor partitions. + */ +static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); + void hcall_tracepoint_regfunc(void) { hcall_tracepoint_refcount++; @@ -725,12 +732,42 @@ void hcall_tracepoint_unregfunc(void) void __trace_hcall_entry(unsigned long opcode, unsigned long *args) { + unsigned long flags; + unsigned int *depth; + + local_irq_save(flags); + + depth = &__get_cpu_var(hcall_trace_depth); + + if (*depth) + goto out; + + (*depth)++; trace_hcall_entry(opcode, args); + (*depth)--; + +out: + local_irq_restore(flags); } void __trace_hcall_exit(long opcode, unsigned long retval, unsigned long *retbuf) { + unsigned long flags; + unsigned int *depth; + + local_irq_save(flags); + + depth = &__get_cpu_var(hcall_trace_depth); + + if (*depth) + goto out; + + (*depth)++; trace_hcall_exit(opcode, retval, retbuf); + (*depth)--; + +out: + local_irq_restore(flags); } #endif -- cgit v1.1