From dc909d8b0c9c0d2c42dc1cf34216c4830f639f7b Mon Sep 17 00:00:00 2001 From: Srikar Dronamraju Date: Wed, 29 Jan 2020 19:23:00 +0530 Subject: [PATCH] powerpc/numa: Early request for home node associativity Currently the kernel detects if its running on a shared lpar platform and requests home node associativity before the scheduler sched_domains are setup. However between the time NUMA setup is initialized and the request for home node associativity, workqueue initializes its per node cpumask. The per node workqueue possible cpumask may turn invalid after home node associativity resulting in weird situations like workqueue possible cpumask being a subset of workqueue online cpumask. This can be fixed by requesting home node associativity earlier just before NUMA setup. However at the NUMA setup time, kernel may not be in a position to detect if its running on a shared lpar platform. So request for home node associativity and if the request fails, fallback on the device tree property. Signed-off-by: Srikar Dronamraju Reported-by: Abdul Haleem Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200129135301.24739-5-srikar@linux.vnet.ibm.com --- arch/powerpc/mm/numa.c | 41 ++++++++++++++++++++++++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 518c6dbbccbe..5a8abf0165d7 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -461,6 +461,41 @@ static int of_drconf_to_nid_single(struct drmem_lmb *lmb) return nid; } +#ifdef CONFIG_PPC_SPLPAR +static int vphn_get_nid(long lcpu) +{ + __be32 associativity[VPHN_ASSOC_BUFSIZE] = {0}; + long rc, hwid; + + /* + * On a shared lpar, device tree will not have node associativity. + * At this time lppaca, or its __old_status field may not be + * updated. Hence kernel cannot detect if its on a shared lpar. So + * request an explicit associativity irrespective of whether the + * lpar is shared or dedicated. Use the device tree property as a + * fallback. cpu_to_phys_id is only valid between + * smp_setup_cpu_maps() and smp_setup_pacas(). + */ + if (firmware_has_feature(FW_FEATURE_VPHN)) { + if (cpu_to_phys_id) + hwid = cpu_to_phys_id[lcpu]; + else + hwid = get_hard_smp_processor_id(lcpu); + + rc = hcall_vphn(hwid, VPHN_FLAG_VCPU, associativity); + if (rc == H_SUCCESS) + return associativity_to_nid(associativity); + } + + return NUMA_NO_NODE; +} +#else +static int vphn_get_nid(long unused) +{ + return NUMA_NO_NODE; +} +#endif /* CONFIG_PPC_SPLPAR */ + /* * Figure out to which domain a cpu belongs and stick it there. * Return the id of the domain used. @@ -485,6 +520,10 @@ static int numa_setup_cpu(unsigned long lcpu) return nid; } + nid = vphn_get_nid(lcpu); + if (nid != NUMA_NO_NODE) + goto out_present; + cpu = of_get_cpu_node(lcpu, NULL); if (!cpu) { @@ -496,6 +535,7 @@ static int numa_setup_cpu(unsigned long lcpu) } nid = of_node_to_nid_single(cpu); + of_node_put(cpu); out_present: if (nid < 0 || !node_possible(nid)) @@ -515,7 +555,6 @@ static int numa_setup_cpu(unsigned long lcpu) } map_cpu_to_node(lcpu, nid); - of_node_put(cpu); out: return nid; }