mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 02:20:52 +07:00
ACPI: Page based coalescing of I/O remappings optimization
This patch optimizes ACPI MMIO remappings by keeping track of the
remappings on a PAGE_SIZE granularity.
When an ioremap() occurs, the underlying infrastructure works on a 'page'
based granularity. As such, an ioremap() request for 1 byte for example,
will end up mapping in an entire (PAGE_SIZE) page. Huang Ying took
advantage of this in commit 15651291a2
by
checking if subsequent ioremap() requests reside within any of the list's
existing remappings still in place, and if so, incrementing a reference
count on the existing mapping as opposed to performing another ioremap().
Signed-off-by: Myron Stowe <myron.stowe@hp.com>
Signed-off-by: Len Brown <len.brown@intel.com>
This commit is contained in:
parent
78cdb3ed40
commit
4a3cba5e72
@ -104,6 +104,7 @@ struct acpi_ioremap {
|
|||||||
void __iomem *virt;
|
void __iomem *virt;
|
||||||
acpi_physical_address phys;
|
acpi_physical_address phys;
|
||||||
acpi_size size;
|
acpi_size size;
|
||||||
|
struct kref ref;
|
||||||
};
|
};
|
||||||
|
|
||||||
static LIST_HEAD(acpi_ioremaps);
|
static LIST_HEAD(acpi_ioremaps);
|
||||||
@ -245,15 +246,28 @@ acpi_physical_address __init acpi_os_get_root_pointer(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
|
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
|
||||||
static void __iomem *
|
static struct acpi_ioremap *
|
||||||
acpi_map_vaddr_lookup(acpi_physical_address phys, acpi_size size)
|
acpi_map_lookup(acpi_physical_address phys, acpi_size size)
|
||||||
{
|
{
|
||||||
struct acpi_ioremap *map;
|
struct acpi_ioremap *map;
|
||||||
|
|
||||||
list_for_each_entry_rcu(map, &acpi_ioremaps, list)
|
list_for_each_entry_rcu(map, &acpi_ioremaps, list)
|
||||||
if (map->phys <= phys &&
|
if (map->phys <= phys &&
|
||||||
phys + size <= map->phys + map->size)
|
phys + size <= map->phys + map->size)
|
||||||
return map->virt + (phys - map->phys);
|
return map;
|
||||||
|
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
|
||||||
|
static void __iomem *
|
||||||
|
acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
|
||||||
|
{
|
||||||
|
struct acpi_ioremap *map;
|
||||||
|
|
||||||
|
map = acpi_map_lookup(phys, size);
|
||||||
|
if (map)
|
||||||
|
return map->virt + (phys - map->phys);
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -265,7 +279,8 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
|||||||
struct acpi_ioremap *map;
|
struct acpi_ioremap *map;
|
||||||
|
|
||||||
list_for_each_entry_rcu(map, &acpi_ioremaps, list)
|
list_for_each_entry_rcu(map, &acpi_ioremaps, list)
|
||||||
if (map->virt == virt && map->size == size)
|
if (map->virt <= virt &&
|
||||||
|
virt + size <= map->virt + map->size)
|
||||||
return map;
|
return map;
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -274,9 +289,10 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
|
|||||||
void __iomem *__init_refok
|
void __iomem *__init_refok
|
||||||
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
||||||
{
|
{
|
||||||
struct acpi_ioremap *map;
|
struct acpi_ioremap *map, *tmp_map;
|
||||||
unsigned long flags;
|
unsigned long flags, pg_sz;
|
||||||
void __iomem *virt;
|
void __iomem *virt;
|
||||||
|
phys_addr_t pg_off;
|
||||||
|
|
||||||
if (phys > ULONG_MAX) {
|
if (phys > ULONG_MAX) {
|
||||||
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
|
printk(KERN_ERR PREFIX "Cannot map memory that high\n");
|
||||||
@ -290,7 +306,9 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
|||||||
if (!map)
|
if (!map)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
virt = ioremap(phys, size);
|
pg_off = round_down(phys, PAGE_SIZE);
|
||||||
|
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
|
||||||
|
virt = ioremap(pg_off, pg_sz);
|
||||||
if (!virt) {
|
if (!virt) {
|
||||||
kfree(map);
|
kfree(map);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -298,21 +316,40 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&map->list);
|
INIT_LIST_HEAD(&map->list);
|
||||||
map->virt = virt;
|
map->virt = virt;
|
||||||
map->phys = phys;
|
map->phys = pg_off;
|
||||||
map->size = size;
|
map->size = pg_sz;
|
||||||
|
kref_init(&map->ref);
|
||||||
|
|
||||||
spin_lock_irqsave(&acpi_ioremap_lock, flags);
|
spin_lock_irqsave(&acpi_ioremap_lock, flags);
|
||||||
|
/* Check if page has already been mapped. */
|
||||||
|
tmp_map = acpi_map_lookup(phys, size);
|
||||||
|
if (tmp_map) {
|
||||||
|
kref_get(&tmp_map->ref);
|
||||||
|
spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
||||||
|
iounmap(map->virt);
|
||||||
|
kfree(map);
|
||||||
|
return tmp_map->virt + (phys - tmp_map->phys);
|
||||||
|
}
|
||||||
list_add_tail_rcu(&map->list, &acpi_ioremaps);
|
list_add_tail_rcu(&map->list, &acpi_ioremaps);
|
||||||
spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
||||||
|
|
||||||
return virt;
|
return map->virt + (phys - map->phys);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
|
EXPORT_SYMBOL_GPL(acpi_os_map_memory);
|
||||||
|
|
||||||
|
static void acpi_kref_del_iomap(struct kref *ref)
|
||||||
|
{
|
||||||
|
struct acpi_ioremap *map;
|
||||||
|
|
||||||
|
map = container_of(ref, struct acpi_ioremap, ref);
|
||||||
|
list_del_rcu(&map->list);
|
||||||
|
}
|
||||||
|
|
||||||
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
|
void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
|
||||||
{
|
{
|
||||||
struct acpi_ioremap *map;
|
struct acpi_ioremap *map;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
int del;
|
||||||
|
|
||||||
if (!acpi_gbl_permanent_mmap) {
|
if (!acpi_gbl_permanent_mmap) {
|
||||||
__acpi_unmap_table(virt, size);
|
__acpi_unmap_table(virt, size);
|
||||||
@ -328,9 +365,12 @@ void __ref acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del_rcu(&map->list);
|
del = kref_put(&map->ref, acpi_kref_del_iomap);
|
||||||
spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
spin_unlock_irqrestore(&acpi_ioremap_lock, flags);
|
||||||
|
|
||||||
|
if (!del)
|
||||||
|
return;
|
||||||
|
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
iounmap(map->virt);
|
iounmap(map->virt);
|
||||||
kfree(map);
|
kfree(map);
|
||||||
|
Loading…
Reference in New Issue
Block a user