mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-26 05:15:11 +07:00
eb254f323b
Pull x86 cache allocation interface from Thomas Gleixner: "This provides support for Intel's Cache Allocation Technology, a cache partitioning mechanism. The interface is odd, but the hardware interface of that CAT stuff is odd as well. We tried hard to come up with an abstraction, but that only allows rather simple partitioning, but no way of sharing and dealing with the per package nature of this mechanism. In the end we decided to expose the allocation bitmaps directly so all combinations of the hardware can be utilized. There are two ways of associating a cache partition: - Task A task can be added to a resource group. It uses the cache partition associated to the group. - CPU All tasks which are not member of a resource group use the group to which the CPU they are running on is associated with. That allows for simple CPU based partitioning schemes. The main expected user sare: - Virtualization so a VM can only trash only the associated part of the cash w/o disturbing others - Real-Time systems to seperate RT and general workloads. - Latency sensitive enterprise workloads - In theory this also can be used to protect against cache side channel attacks" [ Intel RDT is "Resource Director Technology". The interface really is rather odd and very specific, which delayed this pull request while I was thinking about it. The pull request itself came in early during the merge window, I just delayed it until things had calmed down and I had more time. But people tell me they'll use this, and the good news is that it is _so_ specific that it's rather independent of anything else, and no user is going to depend on the interface since it's pretty rare. So if push comes to shove, we can just remove the interface and nothing will break ] * 'x86-cache-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (31 commits) x86/intel_rdt: Implement show_options() for resctrlfs x86/intel_rdt: Call intel_rdt_sched_in() with preemption disabled x86/intel_rdt: Update task closid immediately on CPU in rmdir and unmount x86/intel_rdt: Fix setting of closid when adding CPUs to a group x86/intel_rdt: Update percpu closid immeditately on CPUs affected by changee x86/intel_rdt: Reset per cpu closids on unmount x86/intel_rdt: Select KERNFS when enabling INTEL_RDT_A x86/intel_rdt: Prevent deadlock against hotplug lock x86/intel_rdt: Protect info directory from removal x86/intel_rdt: Add info files to Documentation x86/intel_rdt: Export the minimum number of set mask bits in sysfs x86/intel_rdt: Propagate error in rdt_mount() properly x86/intel_rdt: Add a missing #include MAINTAINERS: Add maintainer for Intel RDT resource allocation x86/intel_rdt: Add scheduler hook x86/intel_rdt: Add schemata file x86/intel_rdt: Add tasks files x86/intel_rdt: Add cpus file x86/intel_rdt: Add mkdir to resctrl file system x86/intel_rdt: Add "info" files to resctrl file system ...
663 lines
17 KiB
C
663 lines
17 KiB
C
/*
|
|
* cacheinfo support - processor cache information via sysfs
|
|
*
|
|
* Based on arch/x86/kernel/cpu/intel_cacheinfo.c
|
|
* Author: Sudeep Holla <sudeep.holla@arm.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
|
|
* kind, whether express or implied; without even the implied warranty
|
|
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
#include <linux/acpi.h>
|
|
#include <linux/bitops.h>
|
|
#include <linux/cacheinfo.h>
|
|
#include <linux/compiler.h>
|
|
#include <linux/cpu.h>
|
|
#include <linux/device.h>
|
|
#include <linux/init.h>
|
|
#include <linux/of.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/sysfs.h>
|
|
|
|
/* pointer to per cpu cacheinfo */
|
|
static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
|
|
#define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu))
|
|
#define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves)
|
|
#define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list)
|
|
|
|
struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
|
|
{
|
|
return ci_cacheinfo(cpu);
|
|
}
|
|
|
|
#ifdef CONFIG_OF
|
|
static int cache_setup_of_node(unsigned int cpu)
|
|
{
|
|
struct device_node *np;
|
|
struct cacheinfo *this_leaf;
|
|
struct device *cpu_dev = get_cpu_device(cpu);
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
unsigned int index = 0;
|
|
|
|
/* skip if of_node is already populated */
|
|
if (this_cpu_ci->info_list->of_node)
|
|
return 0;
|
|
|
|
if (!cpu_dev) {
|
|
pr_err("No cpu device for CPU %d\n", cpu);
|
|
return -ENODEV;
|
|
}
|
|
np = cpu_dev->of_node;
|
|
if (!np) {
|
|
pr_err("Failed to find cpu%d device node\n", cpu);
|
|
return -ENOENT;
|
|
}
|
|
|
|
while (index < cache_leaves(cpu)) {
|
|
this_leaf = this_cpu_ci->info_list + index;
|
|
if (this_leaf->level != 1)
|
|
np = of_find_next_cache_node(np);
|
|
else
|
|
np = of_node_get(np);/* cpu node itself */
|
|
if (!np)
|
|
break;
|
|
this_leaf->of_node = np;
|
|
index++;
|
|
}
|
|
|
|
if (index != cache_leaves(cpu)) /* not all OF nodes populated */
|
|
return -ENOENT;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
|
|
struct cacheinfo *sib_leaf)
|
|
{
|
|
return sib_leaf->of_node == this_leaf->of_node;
|
|
}
|
|
|
|
/* OF properties to query for a given cache type */
|
|
struct cache_type_info {
|
|
const char *size_prop;
|
|
const char *line_size_props[2];
|
|
const char *nr_sets_prop;
|
|
};
|
|
|
|
static const struct cache_type_info cache_type_info[] = {
|
|
{
|
|
.size_prop = "cache-size",
|
|
.line_size_props = { "cache-line-size",
|
|
"cache-block-size", },
|
|
.nr_sets_prop = "cache-sets",
|
|
}, {
|
|
.size_prop = "i-cache-size",
|
|
.line_size_props = { "i-cache-line-size",
|
|
"i-cache-block-size", },
|
|
.nr_sets_prop = "i-cache-sets",
|
|
}, {
|
|
.size_prop = "d-cache-size",
|
|
.line_size_props = { "d-cache-line-size",
|
|
"d-cache-block-size", },
|
|
.nr_sets_prop = "d-cache-sets",
|
|
},
|
|
};
|
|
|
|
static inline int get_cacheinfo_idx(enum cache_type type)
|
|
{
|
|
if (type == CACHE_TYPE_UNIFIED)
|
|
return 0;
|
|
return type;
|
|
}
|
|
|
|
static void cache_size(struct cacheinfo *this_leaf)
|
|
{
|
|
const char *propname;
|
|
const __be32 *cache_size;
|
|
int ct_idx;
|
|
|
|
ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
propname = cache_type_info[ct_idx].size_prop;
|
|
|
|
cache_size = of_get_property(this_leaf->of_node, propname, NULL);
|
|
if (cache_size)
|
|
this_leaf->size = of_read_number(cache_size, 1);
|
|
}
|
|
|
|
/* not cache_line_size() because that's a macro in include/linux/cache.h */
|
|
static void cache_get_line_size(struct cacheinfo *this_leaf)
|
|
{
|
|
const __be32 *line_size;
|
|
int i, lim, ct_idx;
|
|
|
|
ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
lim = ARRAY_SIZE(cache_type_info[ct_idx].line_size_props);
|
|
|
|
for (i = 0; i < lim; i++) {
|
|
const char *propname;
|
|
|
|
propname = cache_type_info[ct_idx].line_size_props[i];
|
|
line_size = of_get_property(this_leaf->of_node, propname, NULL);
|
|
if (line_size)
|
|
break;
|
|
}
|
|
|
|
if (line_size)
|
|
this_leaf->coherency_line_size = of_read_number(line_size, 1);
|
|
}
|
|
|
|
static void cache_nr_sets(struct cacheinfo *this_leaf)
|
|
{
|
|
const char *propname;
|
|
const __be32 *nr_sets;
|
|
int ct_idx;
|
|
|
|
ct_idx = get_cacheinfo_idx(this_leaf->type);
|
|
propname = cache_type_info[ct_idx].nr_sets_prop;
|
|
|
|
nr_sets = of_get_property(this_leaf->of_node, propname, NULL);
|
|
if (nr_sets)
|
|
this_leaf->number_of_sets = of_read_number(nr_sets, 1);
|
|
}
|
|
|
|
static void cache_associativity(struct cacheinfo *this_leaf)
|
|
{
|
|
unsigned int line_size = this_leaf->coherency_line_size;
|
|
unsigned int nr_sets = this_leaf->number_of_sets;
|
|
unsigned int size = this_leaf->size;
|
|
|
|
/*
|
|
* If the cache is fully associative, there is no need to
|
|
* check the other properties.
|
|
*/
|
|
if (!(nr_sets == 1) && (nr_sets > 0 && size > 0 && line_size > 0))
|
|
this_leaf->ways_of_associativity = (size / nr_sets) / line_size;
|
|
}
|
|
|
|
static void cache_of_override_properties(unsigned int cpu)
|
|
{
|
|
int index;
|
|
struct cacheinfo *this_leaf;
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
|
|
for (index = 0; index < cache_leaves(cpu); index++) {
|
|
this_leaf = this_cpu_ci->info_list + index;
|
|
cache_size(this_leaf);
|
|
cache_get_line_size(this_leaf);
|
|
cache_nr_sets(this_leaf);
|
|
cache_associativity(this_leaf);
|
|
}
|
|
}
|
|
#else
|
|
static void cache_of_override_properties(unsigned int cpu) { }
|
|
static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
|
|
static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
|
|
struct cacheinfo *sib_leaf)
|
|
{
|
|
/*
|
|
* For non-DT systems, assume unique level 1 cache, system-wide
|
|
* shared caches for all other levels. This will be used only if
|
|
* arch specific code has not populated shared_cpu_map
|
|
*/
|
|
return !(this_leaf->level == 1);
|
|
}
|
|
#endif
|
|
|
|
static int cache_shared_cpu_map_setup(unsigned int cpu)
|
|
{
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
struct cacheinfo *this_leaf, *sib_leaf;
|
|
unsigned int index;
|
|
int ret = 0;
|
|
|
|
if (this_cpu_ci->cpu_map_populated)
|
|
return 0;
|
|
|
|
if (of_have_populated_dt())
|
|
ret = cache_setup_of_node(cpu);
|
|
else if (!acpi_disabled)
|
|
/* No cache property/hierarchy support yet in ACPI */
|
|
ret = -ENOTSUPP;
|
|
if (ret)
|
|
return ret;
|
|
|
|
for (index = 0; index < cache_leaves(cpu); index++) {
|
|
unsigned int i;
|
|
|
|
this_leaf = this_cpu_ci->info_list + index;
|
|
/* skip if shared_cpu_map is already populated */
|
|
if (!cpumask_empty(&this_leaf->shared_cpu_map))
|
|
continue;
|
|
|
|
cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
|
|
for_each_online_cpu(i) {
|
|
struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
|
|
|
|
if (i == cpu || !sib_cpu_ci->info_list)
|
|
continue;/* skip if itself or no cacheinfo */
|
|
sib_leaf = sib_cpu_ci->info_list + index;
|
|
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
|
|
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
|
|
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
|
|
}
|
|
}
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void cache_shared_cpu_map_remove(unsigned int cpu)
|
|
{
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
struct cacheinfo *this_leaf, *sib_leaf;
|
|
unsigned int sibling, index;
|
|
|
|
for (index = 0; index < cache_leaves(cpu); index++) {
|
|
this_leaf = this_cpu_ci->info_list + index;
|
|
for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
|
|
struct cpu_cacheinfo *sib_cpu_ci;
|
|
|
|
if (sibling == cpu) /* skip itself */
|
|
continue;
|
|
|
|
sib_cpu_ci = get_cpu_cacheinfo(sibling);
|
|
if (!sib_cpu_ci->info_list)
|
|
continue;
|
|
|
|
sib_leaf = sib_cpu_ci->info_list + index;
|
|
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
|
|
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
|
|
}
|
|
of_node_put(this_leaf->of_node);
|
|
}
|
|
}
|
|
|
|
static void cache_override_properties(unsigned int cpu)
|
|
{
|
|
if (of_have_populated_dt())
|
|
return cache_of_override_properties(cpu);
|
|
}
|
|
|
|
static void free_cache_attributes(unsigned int cpu)
|
|
{
|
|
if (!per_cpu_cacheinfo(cpu))
|
|
return;
|
|
|
|
cache_shared_cpu_map_remove(cpu);
|
|
|
|
kfree(per_cpu_cacheinfo(cpu));
|
|
per_cpu_cacheinfo(cpu) = NULL;
|
|
}
|
|
|
|
int __weak init_cache_level(unsigned int cpu)
|
|
{
|
|
return -ENOENT;
|
|
}
|
|
|
|
int __weak populate_cache_leaves(unsigned int cpu)
|
|
{
|
|
return -ENOENT;
|
|
}
|
|
|
|
static int detect_cache_attributes(unsigned int cpu)
|
|
{
|
|
int ret;
|
|
|
|
if (init_cache_level(cpu) || !cache_leaves(cpu))
|
|
return -ENOENT;
|
|
|
|
per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
|
|
sizeof(struct cacheinfo), GFP_KERNEL);
|
|
if (per_cpu_cacheinfo(cpu) == NULL)
|
|
return -ENOMEM;
|
|
|
|
ret = populate_cache_leaves(cpu);
|
|
if (ret)
|
|
goto free_ci;
|
|
/*
|
|
* For systems using DT for cache hierarchy, of_node and shared_cpu_map
|
|
* will be set up here only if they are not populated already
|
|
*/
|
|
ret = cache_shared_cpu_map_setup(cpu);
|
|
if (ret) {
|
|
pr_warn("Unable to detect cache hierarchy for CPU %d\n", cpu);
|
|
goto free_ci;
|
|
}
|
|
|
|
cache_override_properties(cpu);
|
|
return 0;
|
|
|
|
free_ci:
|
|
free_cache_attributes(cpu);
|
|
return ret;
|
|
}
|
|
|
|
/* pointer to cpuX/cache device */
|
|
static DEFINE_PER_CPU(struct device *, ci_cache_dev);
|
|
#define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu))
|
|
|
|
static cpumask_t cache_dev_map;
|
|
|
|
/* pointer to array of devices for cpuX/cache/indexY */
|
|
static DEFINE_PER_CPU(struct device **, ci_index_dev);
|
|
#define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu))
|
|
#define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx])
|
|
|
|
#define show_one(file_name, object) \
|
|
static ssize_t file_name##_show(struct device *dev, \
|
|
struct device_attribute *attr, char *buf) \
|
|
{ \
|
|
struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
|
|
return sprintf(buf, "%u\n", this_leaf->object); \
|
|
}
|
|
|
|
show_one(id, id);
|
|
show_one(level, level);
|
|
show_one(coherency_line_size, coherency_line_size);
|
|
show_one(number_of_sets, number_of_sets);
|
|
show_one(physical_line_partition, physical_line_partition);
|
|
show_one(ways_of_associativity, ways_of_associativity);
|
|
|
|
static ssize_t size_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
|
|
return sprintf(buf, "%uK\n", this_leaf->size >> 10);
|
|
}
|
|
|
|
static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
|
|
{
|
|
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
const struct cpumask *mask = &this_leaf->shared_cpu_map;
|
|
|
|
return cpumap_print_to_pagebuf(list, buf, mask);
|
|
}
|
|
|
|
static ssize_t shared_cpu_map_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
return shared_cpumap_show_func(dev, false, buf);
|
|
}
|
|
|
|
static ssize_t shared_cpu_list_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
return shared_cpumap_show_func(dev, true, buf);
|
|
}
|
|
|
|
static ssize_t type_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
|
|
switch (this_leaf->type) {
|
|
case CACHE_TYPE_DATA:
|
|
return sprintf(buf, "Data\n");
|
|
case CACHE_TYPE_INST:
|
|
return sprintf(buf, "Instruction\n");
|
|
case CACHE_TYPE_UNIFIED:
|
|
return sprintf(buf, "Unified\n");
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
}
|
|
|
|
static ssize_t allocation_policy_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
unsigned int ci_attr = this_leaf->attributes;
|
|
int n = 0;
|
|
|
|
if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
|
|
n = sprintf(buf, "ReadWriteAllocate\n");
|
|
else if (ci_attr & CACHE_READ_ALLOCATE)
|
|
n = sprintf(buf, "ReadAllocate\n");
|
|
else if (ci_attr & CACHE_WRITE_ALLOCATE)
|
|
n = sprintf(buf, "WriteAllocate\n");
|
|
return n;
|
|
}
|
|
|
|
static ssize_t write_policy_show(struct device *dev,
|
|
struct device_attribute *attr, char *buf)
|
|
{
|
|
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
unsigned int ci_attr = this_leaf->attributes;
|
|
int n = 0;
|
|
|
|
if (ci_attr & CACHE_WRITE_THROUGH)
|
|
n = sprintf(buf, "WriteThrough\n");
|
|
else if (ci_attr & CACHE_WRITE_BACK)
|
|
n = sprintf(buf, "WriteBack\n");
|
|
return n;
|
|
}
|
|
|
|
static DEVICE_ATTR_RO(id);
|
|
static DEVICE_ATTR_RO(level);
|
|
static DEVICE_ATTR_RO(type);
|
|
static DEVICE_ATTR_RO(coherency_line_size);
|
|
static DEVICE_ATTR_RO(ways_of_associativity);
|
|
static DEVICE_ATTR_RO(number_of_sets);
|
|
static DEVICE_ATTR_RO(size);
|
|
static DEVICE_ATTR_RO(allocation_policy);
|
|
static DEVICE_ATTR_RO(write_policy);
|
|
static DEVICE_ATTR_RO(shared_cpu_map);
|
|
static DEVICE_ATTR_RO(shared_cpu_list);
|
|
static DEVICE_ATTR_RO(physical_line_partition);
|
|
|
|
static struct attribute *cache_default_attrs[] = {
|
|
&dev_attr_id.attr,
|
|
&dev_attr_type.attr,
|
|
&dev_attr_level.attr,
|
|
&dev_attr_shared_cpu_map.attr,
|
|
&dev_attr_shared_cpu_list.attr,
|
|
&dev_attr_coherency_line_size.attr,
|
|
&dev_attr_ways_of_associativity.attr,
|
|
&dev_attr_number_of_sets.attr,
|
|
&dev_attr_size.attr,
|
|
&dev_attr_allocation_policy.attr,
|
|
&dev_attr_write_policy.attr,
|
|
&dev_attr_physical_line_partition.attr,
|
|
NULL
|
|
};
|
|
|
|
static umode_t
|
|
cache_default_attrs_is_visible(struct kobject *kobj,
|
|
struct attribute *attr, int unused)
|
|
{
|
|
struct device *dev = kobj_to_dev(kobj);
|
|
struct cacheinfo *this_leaf = dev_get_drvdata(dev);
|
|
const struct cpumask *mask = &this_leaf->shared_cpu_map;
|
|
umode_t mode = attr->mode;
|
|
|
|
if ((attr == &dev_attr_id.attr) && (this_leaf->attributes & CACHE_ID))
|
|
return mode;
|
|
if ((attr == &dev_attr_type.attr) && this_leaf->type)
|
|
return mode;
|
|
if ((attr == &dev_attr_level.attr) && this_leaf->level)
|
|
return mode;
|
|
if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
|
|
return mode;
|
|
if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
|
|
return mode;
|
|
if ((attr == &dev_attr_coherency_line_size.attr) &&
|
|
this_leaf->coherency_line_size)
|
|
return mode;
|
|
if ((attr == &dev_attr_ways_of_associativity.attr) &&
|
|
this_leaf->size) /* allow 0 = full associativity */
|
|
return mode;
|
|
if ((attr == &dev_attr_number_of_sets.attr) &&
|
|
this_leaf->number_of_sets)
|
|
return mode;
|
|
if ((attr == &dev_attr_size.attr) && this_leaf->size)
|
|
return mode;
|
|
if ((attr == &dev_attr_write_policy.attr) &&
|
|
(this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
|
|
return mode;
|
|
if ((attr == &dev_attr_allocation_policy.attr) &&
|
|
(this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
|
|
return mode;
|
|
if ((attr == &dev_attr_physical_line_partition.attr) &&
|
|
this_leaf->physical_line_partition)
|
|
return mode;
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct attribute_group cache_default_group = {
|
|
.attrs = cache_default_attrs,
|
|
.is_visible = cache_default_attrs_is_visible,
|
|
};
|
|
|
|
static const struct attribute_group *cache_default_groups[] = {
|
|
&cache_default_group,
|
|
NULL,
|
|
};
|
|
|
|
static const struct attribute_group *cache_private_groups[] = {
|
|
&cache_default_group,
|
|
NULL, /* Place holder for private group */
|
|
NULL,
|
|
};
|
|
|
|
const struct attribute_group *
|
|
__weak cache_get_priv_group(struct cacheinfo *this_leaf)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static const struct attribute_group **
|
|
cache_get_attribute_groups(struct cacheinfo *this_leaf)
|
|
{
|
|
const struct attribute_group *priv_group =
|
|
cache_get_priv_group(this_leaf);
|
|
|
|
if (!priv_group)
|
|
return cache_default_groups;
|
|
|
|
if (!cache_private_groups[1])
|
|
cache_private_groups[1] = priv_group;
|
|
|
|
return cache_private_groups;
|
|
}
|
|
|
|
/* Add/Remove cache interface for CPU device */
|
|
static void cpu_cache_sysfs_exit(unsigned int cpu)
|
|
{
|
|
int i;
|
|
struct device *ci_dev;
|
|
|
|
if (per_cpu_index_dev(cpu)) {
|
|
for (i = 0; i < cache_leaves(cpu); i++) {
|
|
ci_dev = per_cache_index_dev(cpu, i);
|
|
if (!ci_dev)
|
|
continue;
|
|
device_unregister(ci_dev);
|
|
}
|
|
kfree(per_cpu_index_dev(cpu));
|
|
per_cpu_index_dev(cpu) = NULL;
|
|
}
|
|
device_unregister(per_cpu_cache_dev(cpu));
|
|
per_cpu_cache_dev(cpu) = NULL;
|
|
}
|
|
|
|
static int cpu_cache_sysfs_init(unsigned int cpu)
|
|
{
|
|
struct device *dev = get_cpu_device(cpu);
|
|
|
|
if (per_cpu_cacheinfo(cpu) == NULL)
|
|
return -ENOENT;
|
|
|
|
per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
|
|
if (IS_ERR(per_cpu_cache_dev(cpu)))
|
|
return PTR_ERR(per_cpu_cache_dev(cpu));
|
|
|
|
/* Allocate all required memory */
|
|
per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
|
|
sizeof(struct device *), GFP_KERNEL);
|
|
if (unlikely(per_cpu_index_dev(cpu) == NULL))
|
|
goto err_out;
|
|
|
|
return 0;
|
|
|
|
err_out:
|
|
cpu_cache_sysfs_exit(cpu);
|
|
return -ENOMEM;
|
|
}
|
|
|
|
static int cache_add_dev(unsigned int cpu)
|
|
{
|
|
unsigned int i;
|
|
int rc;
|
|
struct device *ci_dev, *parent;
|
|
struct cacheinfo *this_leaf;
|
|
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
|
const struct attribute_group **cache_groups;
|
|
|
|
rc = cpu_cache_sysfs_init(cpu);
|
|
if (unlikely(rc < 0))
|
|
return rc;
|
|
|
|
parent = per_cpu_cache_dev(cpu);
|
|
for (i = 0; i < cache_leaves(cpu); i++) {
|
|
this_leaf = this_cpu_ci->info_list + i;
|
|
if (this_leaf->disable_sysfs)
|
|
continue;
|
|
cache_groups = cache_get_attribute_groups(this_leaf);
|
|
ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
|
|
"index%1u", i);
|
|
if (IS_ERR(ci_dev)) {
|
|
rc = PTR_ERR(ci_dev);
|
|
goto err;
|
|
}
|
|
per_cache_index_dev(cpu, i) = ci_dev;
|
|
}
|
|
cpumask_set_cpu(cpu, &cache_dev_map);
|
|
|
|
return 0;
|
|
err:
|
|
cpu_cache_sysfs_exit(cpu);
|
|
return rc;
|
|
}
|
|
|
|
static int cacheinfo_cpu_online(unsigned int cpu)
|
|
{
|
|
int rc = detect_cache_attributes(cpu);
|
|
|
|
if (rc)
|
|
return rc;
|
|
rc = cache_add_dev(cpu);
|
|
if (rc)
|
|
free_cache_attributes(cpu);
|
|
return rc;
|
|
}
|
|
|
|
static int cacheinfo_cpu_pre_down(unsigned int cpu)
|
|
{
|
|
if (cpumask_test_and_clear_cpu(cpu, &cache_dev_map))
|
|
cpu_cache_sysfs_exit(cpu);
|
|
|
|
free_cache_attributes(cpu);
|
|
return 0;
|
|
}
|
|
|
|
static int __init cacheinfo_sysfs_init(void)
|
|
{
|
|
return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "base/cacheinfo:online",
|
|
cacheinfo_cpu_online, cacheinfo_cpu_pre_down);
|
|
}
|
|
device_initcall(cacheinfo_sysfs_init);
|