mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 19:31:09 +07:00
7d12e780e0
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead of passing regs around manually through all ~1800 interrupt handlers in the Linux kernel. The regs pointer is used in few places, but it potentially costs both stack space and code to pass it around. On the FRV arch, removing the regs parameter from all the genirq function results in a 20% speed up of the IRQ exit path (ie: from leaving timer_interrupt() to leaving do_IRQ()). Where appropriate, an arch may override the generic storage facility and do something different with the variable. On FRV, for instance, the address is maintained in GR28 at all times inside the kernel as part of general exception handling. Having looked over the code, it appears that the parameter may be handed down through up to twenty or so layers of functions. Consider a USB character device attached to a USB hub, attached to a USB controller that posts its interrupts through a cascaded auxiliary interrupt controller. A character device driver may want to pass regs to the sysrq handler through the input layer which adds another few layers of parameter passing. I've build this code with allyesconfig for x86_64 and i386. I've runtested the main part of the code on FRV and i386, though I can't test most of the drivers. I've also done partial conversion for powerpc and MIPS - these at least compile with minimal configurations. This will affect all archs. Mostly the changes should be relatively easy. Take do_IRQ(), store the regs pointer at the beginning, saving the old one: struct pt_regs *old_regs = set_irq_regs(regs); And put the old one back at the end: set_irq_regs(old_regs); Don't pass regs through to generic_handle_irq() or __do_IRQ(). In timer_interrupt(), this sort of change will be necessary: - update_process_times(user_mode(regs)); - profile_tick(CPU_PROFILING, regs); + update_process_times(user_mode(get_irq_regs())); + profile_tick(CPU_PROFILING); I'd like to move update_process_times()'s use of get_irq_regs() into itself, except that i386, alone of the archs, uses something other than user_mode(). Some notes on the interrupt handling in the drivers: (*) input_dev() is now gone entirely. The regs pointer is no longer stored in the input_dev struct. (*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does something different depending on whether it's been supplied with a regs pointer or not. (*) Various IRQ handler function pointers have been moved to type irq_handler_t. Signed-Off-By: David Howells <dhowells@redhat.com> (cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
545 lines
12 KiB
C
545 lines
12 KiB
C
/*
|
|
* resource.c - Contains functions for registering and analyzing resource information
|
|
*
|
|
* based on isapnp.c resource management (c) Jaroslav Kysela <perex@suse.cz>
|
|
* Copyright 2003 Adam Belay <ambx1@neo.rr.com>
|
|
*
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel.h>
|
|
#include <asm/io.h>
|
|
#include <asm/dma.h>
|
|
#include <asm/irq.h>
|
|
#include <linux/pci.h>
|
|
#include <linux/ioport.h>
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/pnp.h>
|
|
#include "base.h"
|
|
|
|
static int pnp_reserve_irq[16] = { [0 ... 15] = -1 }; /* reserve (don't use) some IRQ */
|
|
static int pnp_reserve_dma[8] = { [0 ... 7] = -1 }; /* reserve (don't use) some DMA */
|
|
static int pnp_reserve_io[16] = { [0 ... 15] = -1 }; /* reserve (don't use) some I/O region */
|
|
static int pnp_reserve_mem[16] = { [0 ... 15] = -1 }; /* reserve (don't use) some memory region */
|
|
|
|
|
|
/*
|
|
* option registration
|
|
*/
|
|
|
|
static struct pnp_option * pnp_build_option(int priority)
|
|
{
|
|
struct pnp_option *option = pnp_alloc(sizeof(struct pnp_option));
|
|
|
|
/* check if pnp_alloc ran out of memory */
|
|
if (!option)
|
|
return NULL;
|
|
|
|
option->priority = priority & 0xff;
|
|
/* make sure the priority is valid */
|
|
if (option->priority > PNP_RES_PRIORITY_FUNCTIONAL)
|
|
option->priority = PNP_RES_PRIORITY_INVALID;
|
|
|
|
return option;
|
|
}
|
|
|
|
struct pnp_option * pnp_register_independent_option(struct pnp_dev *dev)
|
|
{
|
|
struct pnp_option *option;
|
|
if (!dev)
|
|
return NULL;
|
|
|
|
option = pnp_build_option(PNP_RES_PRIORITY_PREFERRED);
|
|
|
|
/* this should never happen but if it does we'll try to continue */
|
|
if (dev->independent)
|
|
pnp_err("independent resource already registered");
|
|
dev->independent = option;
|
|
return option;
|
|
}
|
|
|
|
struct pnp_option * pnp_register_dependent_option(struct pnp_dev *dev, int priority)
|
|
{
|
|
struct pnp_option *option;
|
|
if (!dev)
|
|
return NULL;
|
|
|
|
option = pnp_build_option(priority);
|
|
|
|
if (dev->dependent) {
|
|
struct pnp_option *parent = dev->dependent;
|
|
while (parent->next)
|
|
parent = parent->next;
|
|
parent->next = option;
|
|
} else
|
|
dev->dependent = option;
|
|
return option;
|
|
}
|
|
|
|
int pnp_register_irq_resource(struct pnp_option *option, struct pnp_irq *data)
|
|
{
|
|
struct pnp_irq *ptr;
|
|
if (!option)
|
|
return -EINVAL;
|
|
if (!data)
|
|
return -EINVAL;
|
|
|
|
ptr = option->irq;
|
|
while (ptr && ptr->next)
|
|
ptr = ptr->next;
|
|
if (ptr)
|
|
ptr->next = data;
|
|
else
|
|
option->irq = data;
|
|
|
|
#ifdef CONFIG_PCI
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < 16; i++)
|
|
if (test_bit(i, data->map))
|
|
pcibios_penalize_isa_irq(i, 0);
|
|
}
|
|
#endif
|
|
return 0;
|
|
}
|
|
|
|
int pnp_register_dma_resource(struct pnp_option *option, struct pnp_dma *data)
|
|
{
|
|
struct pnp_dma *ptr;
|
|
if (!option)
|
|
return -EINVAL;
|
|
if (!data)
|
|
return -EINVAL;
|
|
|
|
ptr = option->dma;
|
|
while (ptr && ptr->next)
|
|
ptr = ptr->next;
|
|
if (ptr)
|
|
ptr->next = data;
|
|
else
|
|
option->dma = data;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int pnp_register_port_resource(struct pnp_option *option, struct pnp_port *data)
|
|
{
|
|
struct pnp_port *ptr;
|
|
if (!option)
|
|
return -EINVAL;
|
|
if (!data)
|
|
return -EINVAL;
|
|
|
|
ptr = option->port;
|
|
while (ptr && ptr->next)
|
|
ptr = ptr->next;
|
|
if (ptr)
|
|
ptr->next = data;
|
|
else
|
|
option->port = data;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int pnp_register_mem_resource(struct pnp_option *option, struct pnp_mem *data)
|
|
{
|
|
struct pnp_mem *ptr;
|
|
if (!option)
|
|
return -EINVAL;
|
|
if (!data)
|
|
return -EINVAL;
|
|
|
|
ptr = option->mem;
|
|
while (ptr && ptr->next)
|
|
ptr = ptr->next;
|
|
if (ptr)
|
|
ptr->next = data;
|
|
else
|
|
option->mem = data;
|
|
return 0;
|
|
}
|
|
|
|
static void pnp_free_port(struct pnp_port *port)
|
|
{
|
|
struct pnp_port *next;
|
|
|
|
while (port) {
|
|
next = port->next;
|
|
kfree(port);
|
|
port = next;
|
|
}
|
|
}
|
|
|
|
static void pnp_free_irq(struct pnp_irq *irq)
|
|
{
|
|
struct pnp_irq *next;
|
|
|
|
while (irq) {
|
|
next = irq->next;
|
|
kfree(irq);
|
|
irq = next;
|
|
}
|
|
}
|
|
|
|
static void pnp_free_dma(struct pnp_dma *dma)
|
|
{
|
|
struct pnp_dma *next;
|
|
|
|
while (dma) {
|
|
next = dma->next;
|
|
kfree(dma);
|
|
dma = next;
|
|
}
|
|
}
|
|
|
|
static void pnp_free_mem(struct pnp_mem *mem)
|
|
{
|
|
struct pnp_mem *next;
|
|
|
|
while (mem) {
|
|
next = mem->next;
|
|
kfree(mem);
|
|
mem = next;
|
|
}
|
|
}
|
|
|
|
void pnp_free_option(struct pnp_option *option)
|
|
{
|
|
struct pnp_option *next;
|
|
|
|
while (option) {
|
|
next = option->next;
|
|
pnp_free_port(option->port);
|
|
pnp_free_irq(option->irq);
|
|
pnp_free_dma(option->dma);
|
|
pnp_free_mem(option->mem);
|
|
kfree(option);
|
|
option = next;
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
* resource validity checking
|
|
*/
|
|
|
|
#define length(start, end) (*(end) - *(start) + 1)
|
|
|
|
/* Two ranges conflict if one doesn't end before the other starts */
|
|
#define ranged_conflict(starta, enda, startb, endb) \
|
|
!((*(enda) < *(startb)) || (*(endb) < *(starta)))
|
|
|
|
#define cannot_compare(flags) \
|
|
((flags) & (IORESOURCE_UNSET | IORESOURCE_DISABLED))
|
|
|
|
int pnp_check_port(struct pnp_dev * dev, int idx)
|
|
{
|
|
int tmp;
|
|
struct pnp_dev *tdev;
|
|
resource_size_t *port, *end, *tport, *tend;
|
|
port = &dev->res.port_resource[idx].start;
|
|
end = &dev->res.port_resource[idx].end;
|
|
|
|
/* if the resource doesn't exist, don't complain about it */
|
|
if (cannot_compare(dev->res.port_resource[idx].flags))
|
|
return 1;
|
|
|
|
/* check if the resource is already in use, skip if the
|
|
* device is active because it itself may be in use */
|
|
if(!dev->active) {
|
|
if (__check_region(&ioport_resource, *port, length(port,end)))
|
|
return 0;
|
|
}
|
|
|
|
/* check if the resource is reserved */
|
|
for (tmp = 0; tmp < 8; tmp++) {
|
|
int rport = pnp_reserve_io[tmp << 1];
|
|
int rend = pnp_reserve_io[(tmp << 1) + 1] + rport - 1;
|
|
if (ranged_conflict(port,end,&rport,&rend))
|
|
return 0;
|
|
}
|
|
|
|
/* check for internal conflicts */
|
|
for (tmp = 0; tmp < PNP_MAX_PORT && tmp != idx; tmp++) {
|
|
if (dev->res.port_resource[tmp].flags & IORESOURCE_IO) {
|
|
tport = &dev->res.port_resource[tmp].start;
|
|
tend = &dev->res.port_resource[tmp].end;
|
|
if (ranged_conflict(port,end,tport,tend))
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/* check for conflicts with other pnp devices */
|
|
pnp_for_each_dev(tdev) {
|
|
if (tdev == dev)
|
|
continue;
|
|
for (tmp = 0; tmp < PNP_MAX_PORT; tmp++) {
|
|
if (tdev->res.port_resource[tmp].flags & IORESOURCE_IO) {
|
|
if (cannot_compare(tdev->res.port_resource[tmp].flags))
|
|
continue;
|
|
tport = &tdev->res.port_resource[tmp].start;
|
|
tend = &tdev->res.port_resource[tmp].end;
|
|
if (ranged_conflict(port,end,tport,tend))
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
int pnp_check_mem(struct pnp_dev * dev, int idx)
|
|
{
|
|
int tmp;
|
|
struct pnp_dev *tdev;
|
|
resource_size_t *addr, *end, *taddr, *tend;
|
|
addr = &dev->res.mem_resource[idx].start;
|
|
end = &dev->res.mem_resource[idx].end;
|
|
|
|
/* if the resource doesn't exist, don't complain about it */
|
|
if (cannot_compare(dev->res.mem_resource[idx].flags))
|
|
return 1;
|
|
|
|
/* check if the resource is already in use, skip if the
|
|
* device is active because it itself may be in use */
|
|
if(!dev->active) {
|
|
if (check_mem_region(*addr, length(addr,end)))
|
|
return 0;
|
|
}
|
|
|
|
/* check if the resource is reserved */
|
|
for (tmp = 0; tmp < 8; tmp++) {
|
|
int raddr = pnp_reserve_mem[tmp << 1];
|
|
int rend = pnp_reserve_mem[(tmp << 1) + 1] + raddr - 1;
|
|
if (ranged_conflict(addr,end,&raddr,&rend))
|
|
return 0;
|
|
}
|
|
|
|
/* check for internal conflicts */
|
|
for (tmp = 0; tmp < PNP_MAX_MEM && tmp != idx; tmp++) {
|
|
if (dev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
|
|
taddr = &dev->res.mem_resource[tmp].start;
|
|
tend = &dev->res.mem_resource[tmp].end;
|
|
if (ranged_conflict(addr,end,taddr,tend))
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/* check for conflicts with other pnp devices */
|
|
pnp_for_each_dev(tdev) {
|
|
if (tdev == dev)
|
|
continue;
|
|
for (tmp = 0; tmp < PNP_MAX_MEM; tmp++) {
|
|
if (tdev->res.mem_resource[tmp].flags & IORESOURCE_MEM) {
|
|
if (cannot_compare(tdev->res.mem_resource[tmp].flags))
|
|
continue;
|
|
taddr = &tdev->res.mem_resource[tmp].start;
|
|
tend = &tdev->res.mem_resource[tmp].end;
|
|
if (ranged_conflict(addr,end,taddr,tend))
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
static irqreturn_t pnp_test_handler(int irq, void *dev_id)
|
|
{
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
int pnp_check_irq(struct pnp_dev * dev, int idx)
|
|
{
|
|
int tmp;
|
|
struct pnp_dev *tdev;
|
|
resource_size_t * irq = &dev->res.irq_resource[idx].start;
|
|
|
|
/* if the resource doesn't exist, don't complain about it */
|
|
if (cannot_compare(dev->res.irq_resource[idx].flags))
|
|
return 1;
|
|
|
|
/* check if the resource is valid */
|
|
if (*irq < 0 || *irq > 15)
|
|
return 0;
|
|
|
|
/* check if the resource is reserved */
|
|
for (tmp = 0; tmp < 16; tmp++) {
|
|
if (pnp_reserve_irq[tmp] == *irq)
|
|
return 0;
|
|
}
|
|
|
|
/* check for internal conflicts */
|
|
for (tmp = 0; tmp < PNP_MAX_IRQ && tmp != idx; tmp++) {
|
|
if (dev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) {
|
|
if (dev->res.irq_resource[tmp].start == *irq)
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
#ifdef CONFIG_PCI
|
|
/* check if the resource is being used by a pci device */
|
|
{
|
|
struct pci_dev *pci = NULL;
|
|
for_each_pci_dev(pci) {
|
|
if (pci->irq == *irq)
|
|
return 0;
|
|
}
|
|
}
|
|
#endif
|
|
|
|
/* check if the resource is already in use, skip if the
|
|
* device is active because it itself may be in use */
|
|
if(!dev->active) {
|
|
if (request_irq(*irq, pnp_test_handler,
|
|
IRQF_DISABLED|IRQF_PROBE_SHARED, "pnp", NULL))
|
|
return 0;
|
|
free_irq(*irq, NULL);
|
|
}
|
|
|
|
/* check for conflicts with other pnp devices */
|
|
pnp_for_each_dev(tdev) {
|
|
if (tdev == dev)
|
|
continue;
|
|
for (tmp = 0; tmp < PNP_MAX_IRQ; tmp++) {
|
|
if (tdev->res.irq_resource[tmp].flags & IORESOURCE_IRQ) {
|
|
if (cannot_compare(tdev->res.irq_resource[tmp].flags))
|
|
continue;
|
|
if ((tdev->res.irq_resource[tmp].start == *irq))
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
int pnp_check_dma(struct pnp_dev * dev, int idx)
|
|
{
|
|
#ifndef CONFIG_IA64
|
|
int tmp;
|
|
struct pnp_dev *tdev;
|
|
resource_size_t * dma = &dev->res.dma_resource[idx].start;
|
|
|
|
/* if the resource doesn't exist, don't complain about it */
|
|
if (cannot_compare(dev->res.dma_resource[idx].flags))
|
|
return 1;
|
|
|
|
/* check if the resource is valid */
|
|
if (*dma < 0 || *dma == 4 || *dma > 7)
|
|
return 0;
|
|
|
|
/* check if the resource is reserved */
|
|
for (tmp = 0; tmp < 8; tmp++) {
|
|
if (pnp_reserve_dma[tmp] == *dma)
|
|
return 0;
|
|
}
|
|
|
|
/* check for internal conflicts */
|
|
for (tmp = 0; tmp < PNP_MAX_DMA && tmp != idx; tmp++) {
|
|
if (dev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
|
|
if (dev->res.dma_resource[tmp].start == *dma)
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
/* check if the resource is already in use, skip if the
|
|
* device is active because it itself may be in use */
|
|
if(!dev->active) {
|
|
if (request_dma(*dma, "pnp"))
|
|
return 0;
|
|
free_dma(*dma);
|
|
}
|
|
|
|
/* check for conflicts with other pnp devices */
|
|
pnp_for_each_dev(tdev) {
|
|
if (tdev == dev)
|
|
continue;
|
|
for (tmp = 0; tmp < PNP_MAX_DMA; tmp++) {
|
|
if (tdev->res.dma_resource[tmp].flags & IORESOURCE_DMA) {
|
|
if (cannot_compare(tdev->res.dma_resource[tmp].flags))
|
|
continue;
|
|
if ((tdev->res.dma_resource[tmp].start == *dma))
|
|
return 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
return 1;
|
|
#else
|
|
/* IA64 hasn't legacy DMA */
|
|
return 0;
|
|
#endif
|
|
}
|
|
|
|
|
|
#if 0
|
|
EXPORT_SYMBOL(pnp_register_dependent_option);
|
|
EXPORT_SYMBOL(pnp_register_independent_option);
|
|
EXPORT_SYMBOL(pnp_register_irq_resource);
|
|
EXPORT_SYMBOL(pnp_register_dma_resource);
|
|
EXPORT_SYMBOL(pnp_register_port_resource);
|
|
EXPORT_SYMBOL(pnp_register_mem_resource);
|
|
#endif /* 0 */
|
|
|
|
|
|
/* format is: pnp_reserve_irq=irq1[,irq2] .... */
|
|
|
|
static int __init pnp_setup_reserve_irq(char *str)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < 16; i++)
|
|
if (get_option(&str,&pnp_reserve_irq[i]) != 2)
|
|
break;
|
|
return 1;
|
|
}
|
|
|
|
__setup("pnp_reserve_irq=", pnp_setup_reserve_irq);
|
|
|
|
/* format is: pnp_reserve_dma=dma1[,dma2] .... */
|
|
|
|
static int __init pnp_setup_reserve_dma(char *str)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < 8; i++)
|
|
if (get_option(&str,&pnp_reserve_dma[i]) != 2)
|
|
break;
|
|
return 1;
|
|
}
|
|
|
|
__setup("pnp_reserve_dma=", pnp_setup_reserve_dma);
|
|
|
|
/* format is: pnp_reserve_io=io1,size1[,io2,size2] .... */
|
|
|
|
static int __init pnp_setup_reserve_io(char *str)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < 16; i++)
|
|
if (get_option(&str,&pnp_reserve_io[i]) != 2)
|
|
break;
|
|
return 1;
|
|
}
|
|
|
|
__setup("pnp_reserve_io=", pnp_setup_reserve_io);
|
|
|
|
/* format is: pnp_reserve_mem=mem1,size1[,mem2,size2] .... */
|
|
|
|
static int __init pnp_setup_reserve_mem(char *str)
|
|
{
|
|
int i;
|
|
|
|
for (i = 0; i < 16; i++)
|
|
if (get_option(&str,&pnp_reserve_mem[i]) != 2)
|
|
break;
|
|
return 1;
|
|
}
|
|
|
|
__setup("pnp_reserve_mem=", pnp_setup_reserve_mem);
|