mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-11 21:46:46 +07:00
143e1e28cb
We replace the old way to configure the scheduler topology with a new method which enables a platform to declare additionnal level (if needed). We still have a default topology table definition that can be used by platform that don't want more level than the SMT, MC, CPU and NUMA ones. This table can be overwritten by an arch which either wants to add new level where a load balance make sense like BOOK or powergating level or wants to change the flags configuration of some levels. For each level, we need a function pointer that returns cpumask for each cpu, a function pointer that returns the flags for the level and a name. Only flags that describe topology, can be set by an architecture. The current topology flags are: SD_SHARE_CPUPOWER SD_SHARE_PKG_RESOURCES SD_NUMA SD_ASYM_PACKING Then, each level must be a subset on the next one. The build sequence of the sched_domain will take care of removing useless levels like those with 1 CPU and those with the same CPU span and no more relevant information for load balancing than its children. Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org> Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Christoph Lameter <cl@linux.com> Cc: David S. Miller <davem@davemloft.net> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Hanjun Guo <hanjun.guo@linaro.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Jason Low <jason.low2@hp.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: linux390@de.ibm.com Cc: linux-ia64@vger.kernel.org Cc: linux-s390@vger.kernel.org Link: http://lkml.kernel.org/r/1397209481-28542-2-git-send-email-vincent.guittot@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
197 lines
4.5 KiB
C
197 lines
4.5 KiB
C
/*
|
|
* include/linux/topology.h
|
|
*
|
|
* Written by: Matthew Dobson, IBM Corporation
|
|
*
|
|
* Copyright (C) 2002, IBM Corp.
|
|
*
|
|
* All rights reserved.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License as published by
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
* (at your option) any later version.
|
|
*
|
|
* This program is distributed in the hope that it will be useful, but
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
|
* NON INFRINGEMENT. See the GNU General Public License for more
|
|
* details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
*
|
|
* Send feedback to <colpatch@us.ibm.com>
|
|
*/
|
|
#ifndef _LINUX_TOPOLOGY_H
|
|
#define _LINUX_TOPOLOGY_H
|
|
|
|
#include <linux/cpumask.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/percpu.h>
|
|
#include <asm/topology.h>
|
|
|
|
#ifndef node_has_online_mem
|
|
#define node_has_online_mem(nid) (1)
|
|
#endif
|
|
|
|
#ifndef nr_cpus_node
|
|
#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node))
|
|
#endif
|
|
|
|
#define for_each_node_with_cpus(node) \
|
|
for_each_online_node(node) \
|
|
if (nr_cpus_node(node))
|
|
|
|
int arch_update_cpu_topology(void);
|
|
|
|
/* Conform to ACPI 2.0 SLIT distance definitions */
|
|
#define LOCAL_DISTANCE 10
|
|
#define REMOTE_DISTANCE 20
|
|
#ifndef node_distance
|
|
#define node_distance(from,to) ((from) == (to) ? LOCAL_DISTANCE : REMOTE_DISTANCE)
|
|
#endif
|
|
#ifndef RECLAIM_DISTANCE
|
|
/*
|
|
* If the distance between nodes in a system is larger than RECLAIM_DISTANCE
|
|
* (in whatever arch specific measurement units returned by node_distance())
|
|
* then switch on zone reclaim on boot.
|
|
*/
|
|
#define RECLAIM_DISTANCE 30
|
|
#endif
|
|
#ifndef PENALTY_FOR_NODE_WITH_CPUS
|
|
#define PENALTY_FOR_NODE_WITH_CPUS (1)
|
|
#endif
|
|
|
|
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
|
|
DECLARE_PER_CPU(int, numa_node);
|
|
|
|
#ifndef numa_node_id
|
|
/* Returns the number of the current Node. */
|
|
static inline int numa_node_id(void)
|
|
{
|
|
return raw_cpu_read(numa_node);
|
|
}
|
|
#endif
|
|
|
|
#ifndef cpu_to_node
|
|
static inline int cpu_to_node(int cpu)
|
|
{
|
|
return per_cpu(numa_node, cpu);
|
|
}
|
|
#endif
|
|
|
|
#ifndef set_numa_node
|
|
static inline void set_numa_node(int node)
|
|
{
|
|
this_cpu_write(numa_node, node);
|
|
}
|
|
#endif
|
|
|
|
#ifndef set_cpu_numa_node
|
|
static inline void set_cpu_numa_node(int cpu, int node)
|
|
{
|
|
per_cpu(numa_node, cpu) = node;
|
|
}
|
|
#endif
|
|
|
|
#else /* !CONFIG_USE_PERCPU_NUMA_NODE_ID */
|
|
|
|
/* Returns the number of the current Node. */
|
|
#ifndef numa_node_id
|
|
static inline int numa_node_id(void)
|
|
{
|
|
return cpu_to_node(raw_smp_processor_id());
|
|
}
|
|
#endif
|
|
|
|
#endif /* [!]CONFIG_USE_PERCPU_NUMA_NODE_ID */
|
|
|
|
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
|
|
|
|
/*
|
|
* N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
|
|
* It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
|
|
* Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem().
|
|
*/
|
|
DECLARE_PER_CPU(int, _numa_mem_);
|
|
|
|
#ifndef set_numa_mem
|
|
static inline void set_numa_mem(int node)
|
|
{
|
|
this_cpu_write(_numa_mem_, node);
|
|
}
|
|
#endif
|
|
|
|
#ifndef numa_mem_id
|
|
/* Returns the number of the nearest Node with memory */
|
|
static inline int numa_mem_id(void)
|
|
{
|
|
return raw_cpu_read(_numa_mem_);
|
|
}
|
|
#endif
|
|
|
|
#ifndef cpu_to_mem
|
|
static inline int cpu_to_mem(int cpu)
|
|
{
|
|
return per_cpu(_numa_mem_, cpu);
|
|
}
|
|
#endif
|
|
|
|
#ifndef set_cpu_numa_mem
|
|
static inline void set_cpu_numa_mem(int cpu, int node)
|
|
{
|
|
per_cpu(_numa_mem_, cpu) = node;
|
|
}
|
|
#endif
|
|
|
|
#else /* !CONFIG_HAVE_MEMORYLESS_NODES */
|
|
|
|
#ifndef numa_mem_id
|
|
/* Returns the number of the nearest Node with memory */
|
|
static inline int numa_mem_id(void)
|
|
{
|
|
return numa_node_id();
|
|
}
|
|
#endif
|
|
|
|
#ifndef cpu_to_mem
|
|
static inline int cpu_to_mem(int cpu)
|
|
{
|
|
return cpu_to_node(cpu);
|
|
}
|
|
#endif
|
|
|
|
#endif /* [!]CONFIG_HAVE_MEMORYLESS_NODES */
|
|
|
|
#ifndef topology_physical_package_id
|
|
#define topology_physical_package_id(cpu) ((void)(cpu), -1)
|
|
#endif
|
|
#ifndef topology_core_id
|
|
#define topology_core_id(cpu) ((void)(cpu), 0)
|
|
#endif
|
|
#ifndef topology_thread_cpumask
|
|
#define topology_thread_cpumask(cpu) cpumask_of(cpu)
|
|
#endif
|
|
#ifndef topology_core_cpumask
|
|
#define topology_core_cpumask(cpu) cpumask_of(cpu)
|
|
#endif
|
|
|
|
#ifdef CONFIG_SCHED_SMT
|
|
static inline const struct cpumask *cpu_smt_mask(int cpu)
|
|
{
|
|
return topology_thread_cpumask(cpu);
|
|
}
|
|
#endif
|
|
|
|
static inline const struct cpumask *cpu_cpu_mask(int cpu)
|
|
{
|
|
return cpumask_of_node(cpu_to_node(cpu));
|
|
}
|
|
|
|
|
|
#endif /* _LINUX_TOPOLOGY_H */
|