mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 04:36:51 +07:00
ARC Fixes
- perf interrupts on SMP: Not enabled (at boot) and disabled (at runtime) - stack unwinder regression (for modules, ignoring dwarf3) - nsim hosed for non default kernel link base builds -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWc/ceAAoJEGnX8d3iisJeenQP/jdQPxtDU5CYURuqpS1Xb7hN vEBCdj/2g8aNxF5/KSsUzSnnH5DJkm4I6fqo+/dqr9J32NrsU9skvO20BMSckMYV cLhrmsVLqo9DLGj23Gjl1427o0cLIloQwrE0hfgYGz2ceMl8CwTjt89+ZHPNZ2WB 2m0q4pFtwHY5ZZBSh+kmN0OCZIVLB3Jydp0+V18DQtRFop22tkypYtiQ1DP3NysU /1w5EJopjGSqRbtQSzahFtXEwAzhM2UfrIYSZs0iXSGDTkggH4ouJs817g0kHqqL MOSvwlEk0Yz2jTr+PwHX1PSvIRfrJHi2x9U2nfrB9Q2flXV/XU3ZX1/k3lDV1mh2 hQjwXsCDyQEhAvVqVtLZqxHbrFumlOW1rd8tfl49MYCqRJy/TOYXyxYzk1M0zUMy pgUHjEBrK0hnpnycxOLQV6XrZaYEDWFH88Ke+vxI4lI5pGwarQLPgCfd2kCX/0jG miACD2EK97fdvr7b9UATJ++UUqhwOwYZ+OCw4vJhiDwBAOVgPtLKnrTETPqqqPBt 72uwGxCjjHI6a9foDDcf41BbgktqInUvyf/bHOJOnEiXT+U1SN/4+1E5sbgL+W6q 5kDGhaWxc81EEmrwI4kFtPa3JdwWiJc2R1JU5Gkng/DHrVUFEHsEpcYqVgciqNqm c+84ML+VzkxIKnVgQvqM =+aYb -----END PGP SIGNATURE----- Merge tag 'arc-fixes-for-4.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc Pull ARC architecture fixes from Vineet Gupta: "Fixes for: - perf interrupts on SMP: Not enabled (at boot) and disabled (at runtime) - stack unwinder regression (for modules, ignoring dwarf3) - nsim hosed for non default kernel link base builds" * tag 'arc-fixes-for-4.4-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: smp: Rename platform hook @init_cpu_smp -> @init_per_cpu ARC: rename smp operation init_irq_cpu() to init_per_cpu() ARC: dw2 unwind: Ignore CIE version !=1 gracefully instead of bailing ARC: dw2 unwind: Reinstante unwinding out of modules ARC: [plat-sim] unbork non default CONFIG_LINUX_LINK_BASE ARC: intc: Document arc_request_percpu_irq() better ARCv2: perf: Ensure perf intr gets enabled on all cores ARC: intc: No need to clear IRQ_NOAUTOEN ARCv2: intc: Fix random perf irq disabling in SMP setup ARC: [axs10x] cap ethernet phy to 100 Mbit/sec
This commit is contained in:
commit
83ad283f6b
@ -445,6 +445,7 @@ config LINUX_LINK_BASE
|
||||
However some customers have peripherals mapped at this addr, so
|
||||
Linux needs to be scooted a bit.
|
||||
If you don't know what the above means, leave this setting alone.
|
||||
This needs to match memory start address specified in Device Tree
|
||||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
|
@ -46,6 +46,7 @@ ethernet@0x18000 {
|
||||
snps,pbl = < 32 >;
|
||||
clocks = <&apbclk>;
|
||||
clock-names = "stmmaceth";
|
||||
max-speed = <100>;
|
||||
};
|
||||
|
||||
ehci@0x40000 {
|
||||
|
@ -17,7 +17,8 @@ / {
|
||||
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
|
||||
/* CONFIG_LINUX_LINK_BASE needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
|
||||
0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
|
||||
};
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
* @dt_compat: Array of device tree 'compatible' strings
|
||||
* (XXX: although only 1st entry is looked at)
|
||||
* @init_early: Very early callback [called from setup_arch()]
|
||||
* @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP)
|
||||
* @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
|
||||
* [(M):init_IRQ(), (o):start_kernel_secondary()]
|
||||
* @init_machine: arch initcall level callback (e.g. populate static
|
||||
* platform devices or parse Devicetree)
|
||||
@ -35,7 +35,7 @@ struct machine_desc {
|
||||
const char **dt_compat;
|
||||
void (*init_early)(void);
|
||||
#ifdef CONFIG_SMP
|
||||
void (*init_cpu_smp)(unsigned int);
|
||||
void (*init_per_cpu)(unsigned int);
|
||||
#endif
|
||||
void (*init_machine)(void);
|
||||
void (*init_late)(void);
|
||||
|
@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
|
||||
* @init_early_smp: A SMP specific h/w block can init itself
|
||||
* Could be common across platforms so not covered by
|
||||
* mach_desc->init_early()
|
||||
* @init_irq_cpu: Called for each core so SMP h/w block driver can do
|
||||
* @init_per_cpu: Called for each core so SMP h/w block driver can do
|
||||
* any needed setup per cpu (e.g. IPI request)
|
||||
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
|
||||
* @ipi_send: To send IPI to a @cpu
|
||||
@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
|
||||
struct plat_smp_ops {
|
||||
const char *info;
|
||||
void (*init_early_smp)(void);
|
||||
void (*init_irq_cpu)(int cpu);
|
||||
void (*init_per_cpu)(int cpu);
|
||||
void (*cpu_kick)(int cpu, unsigned long pc);
|
||||
void (*ipi_send)(int cpu);
|
||||
void (*ipi_clear)(int irq);
|
||||
|
@ -112,7 +112,6 @@ struct unwind_frame_info {
|
||||
|
||||
extern int arc_unwind(struct unwind_frame_info *frame);
|
||||
extern void arc_unwind_init(void);
|
||||
extern void arc_unwind_setup(void);
|
||||
extern void *unwind_add_table(struct module *module, const void *table_start,
|
||||
unsigned long table_size);
|
||||
extern void unwind_remove_table(void *handle, int init_only);
|
||||
@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void arc_unwind_setup(void)
|
||||
{
|
||||
}
|
||||
#define unwind_add_table(a, b, c)
|
||||
#define unwind_remove_table(a, b)
|
||||
|
||||
|
@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
|
||||
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
if (irq == TIMER0_IRQ || irq == IPI_IRQ)
|
||||
/*
|
||||
* core intc IRQs [16, 23]:
|
||||
* Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
|
||||
*/
|
||||
if (hw < 24) {
|
||||
/*
|
||||
* A subsequent request_percpu_irq() fails if percpu_devid is
|
||||
* not set. That in turns sets NOAUTOEN, meaning each core needs
|
||||
* to call enable_percpu_irq()
|
||||
*/
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
|
||||
else
|
||||
} else {
|
||||
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -29,11 +29,11 @@ void __init init_IRQ(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* a SMP H/w block could do IPI IRQ request here */
|
||||
if (plat_smp_ops.init_irq_cpu)
|
||||
plat_smp_ops.init_irq_cpu(smp_processor_id());
|
||||
if (plat_smp_ops.init_per_cpu)
|
||||
plat_smp_ops.init_per_cpu(smp_processor_id());
|
||||
|
||||
if (machine_desc->init_cpu_smp)
|
||||
machine_desc->init_cpu_smp(smp_processor_id());
|
||||
if (machine_desc->init_per_cpu)
|
||||
machine_desc->init_per_cpu(smp_processor_id());
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* API called for requesting percpu interrupts - called by each CPU
|
||||
* - For boot CPU, actually request the IRQ with genirq core + enables
|
||||
* - For subsequent callers only enable called locally
|
||||
*
|
||||
* Relies on being called by boot cpu first (i.e. request called ahead) of
|
||||
* any enable as expected by genirq. Hence Suitable only for TIMER, IPI
|
||||
* which are guaranteed to be setup on boot core first.
|
||||
* Late probed peripherals such as perf can't use this as there no guarantee
|
||||
* of being called on boot CPU first.
|
||||
*/
|
||||
|
||||
void arc_request_percpu_irq(int irq, int cpu,
|
||||
irqreturn_t (*isr)(int irq, void *dev),
|
||||
const char *irq_nm,
|
||||
@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
|
||||
if (!cpu) {
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
/*
|
||||
* These 2 calls are essential to making percpu IRQ APIs work
|
||||
* Ideally these details could be hidden in irq chip map function
|
||||
* but the issue is IPIs IRQs being static (non-DT) and platform
|
||||
* specific, so we can't identify them there.
|
||||
* A subsequent request_percpu_irq() fails if percpu_devid is
|
||||
* not set. That in turns sets NOAUTOEN, meaning each core needs
|
||||
* to call enable_percpu_irq()
|
||||
*
|
||||
* For ARCv2, this is done in irq map function since we know
|
||||
* which irqs are strictly per cpu
|
||||
*/
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
|
||||
#endif
|
||||
|
||||
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
|
||||
if (rc)
|
||||
|
@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
|
||||
struct plat_smp_ops plat_smp_ops = {
|
||||
.info = smp_cpuinfo_buf,
|
||||
.init_early_smp = mcip_probe_n_setup,
|
||||
.init_irq_cpu = mcip_setup_per_cpu,
|
||||
.init_per_cpu = mcip_setup_per_cpu,
|
||||
.ipi_send = mcip_ipi_send,
|
||||
.ipi_clear = mcip_ipi_clear,
|
||||
};
|
||||
|
@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
|
||||
|
||||
#endif /* CONFIG_ISA_ARCV2 */
|
||||
|
||||
void arc_cpu_pmu_irq_init(void)
|
||||
static void arc_cpu_pmu_irq_init(void *data)
|
||||
{
|
||||
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
|
||||
int irq = *(int *)data;
|
||||
|
||||
arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
|
||||
"ARC perf counters", pmu_cpu);
|
||||
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
||||
|
||||
/* Clear all pending interrupt flags */
|
||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
||||
@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
if (has_interrupts) {
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
unsigned long flags;
|
||||
|
||||
if (irq < 0) {
|
||||
pr_err("Cannot get IRQ number for the platform\n");
|
||||
@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
arc_pmu->irq = irq;
|
||||
|
||||
/*
|
||||
* arc_cpu_pmu_irq_init() needs to be called on all cores for
|
||||
* their respective local PMU.
|
||||
* However we use opencoded on_each_cpu() to ensure it is called
|
||||
* on core0 first, so that arc_request_percpu_irq() sets up
|
||||
* AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
|
||||
* perf IRQ on non master cores.
|
||||
* see arc_request_percpu_irq()
|
||||
*/
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
arc_cpu_pmu_irq_init();
|
||||
local_irq_restore(flags);
|
||||
smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
|
||||
preempt_enable();
|
||||
/* intc map function ensures irq_set_percpu_devid() called */
|
||||
request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
|
||||
this_cpu_ptr(&arc_pmu_cpu));
|
||||
|
||||
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
|
||||
|
||||
/* Clean all pending interrupt flags */
|
||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
||||
} else
|
||||
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
|
@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
#endif
|
||||
|
||||
arc_unwind_init();
|
||||
arc_unwind_setup();
|
||||
}
|
||||
|
||||
static int __init customize_machine(void)
|
||||
|
@ -132,11 +132,11 @@ void start_kernel_secondary(void)
|
||||
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
|
||||
|
||||
/* Some SMP H/w setup - for each cpu */
|
||||
if (plat_smp_ops.init_irq_cpu)
|
||||
plat_smp_ops.init_irq_cpu(cpu);
|
||||
if (plat_smp_ops.init_per_cpu)
|
||||
plat_smp_ops.init_per_cpu(cpu);
|
||||
|
||||
if (machine_desc->init_cpu_smp)
|
||||
machine_desc->init_cpu_smp(cpu);
|
||||
if (machine_desc->init_per_cpu)
|
||||
machine_desc->init_per_cpu(cpu);
|
||||
|
||||
arc_local_timer_setup();
|
||||
|
||||
|
@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
|
||||
|
||||
static unsigned long read_pointer(const u8 **pLoc,
|
||||
const void *end, signed ptrType);
|
||||
static void init_unwind_hdr(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long));
|
||||
|
||||
/*
|
||||
* wrappers for header alloc (vs. calling one vs. other at call site)
|
||||
* to elide section mismatches warnings
|
||||
*/
|
||||
static void *__init unw_hdr_alloc_early(unsigned long sz)
|
||||
{
|
||||
return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
|
||||
MAX_DMA_ADDRESS);
|
||||
}
|
||||
|
||||
static void *unw_hdr_alloc(unsigned long sz)
|
||||
{
|
||||
return kmalloc(sz, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void init_unwind_table(struct unwind_table *table, const char *name,
|
||||
const void *core_start, unsigned long core_size,
|
||||
@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
|
||||
__start_unwind, __end_unwind - __start_unwind,
|
||||
NULL, 0);
|
||||
/*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
|
||||
|
||||
init_unwind_hdr(&root_table, unw_hdr_alloc_early);
|
||||
}
|
||||
|
||||
static const u32 bad_cie, not_fde;
|
||||
@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
|
||||
e2->fde = v;
|
||||
}
|
||||
|
||||
static void __init setup_unwind_table(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long))
|
||||
static void init_unwind_hdr(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long))
|
||||
{
|
||||
const u8 *ptr;
|
||||
unsigned long tableSize = table->size, hdrSize;
|
||||
@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
const u32 *cie = cie_for_fde(fde, table);
|
||||
signed ptrType;
|
||||
|
||||
if (cie == ¬_fde)
|
||||
if (cie == ¬_fde) /* only process FDE here */
|
||||
continue;
|
||||
if (cie == NULL || cie == &bad_cie)
|
||||
return;
|
||||
continue; /* say FDE->CIE.version != 1 */
|
||||
ptrType = fde_pointer_type(cie);
|
||||
if (ptrType < 0)
|
||||
return;
|
||||
continue;
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
|
||||
@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
|
||||
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
|
||||
+ 2 * n * sizeof(unsigned long);
|
||||
|
||||
header = alloc(hdrSize);
|
||||
if (!header)
|
||||
return;
|
||||
|
||||
header->version = 1;
|
||||
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
|
||||
header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
|
||||
@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
|
||||
if (fde[1] == 0xffffffff)
|
||||
continue; /* this is a CIE */
|
||||
|
||||
if (*(u8 *)(cie + 2) != 1)
|
||||
continue; /* FDE->CIE.version not supported */
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
header->table[n].start = read_pointer(&ptr,
|
||||
(const u8 *)(fde + 1) +
|
||||
@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
table->header = (const void *)header;
|
||||
}
|
||||
|
||||
static void *__init balloc(unsigned long sz)
|
||||
{
|
||||
return __alloc_bootmem_nopanic(sz,
|
||||
sizeof(unsigned int),
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
void __init arc_unwind_setup(void)
|
||||
{
|
||||
setup_unwind_table(&root_table, balloc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
static struct unwind_table *last_table;
|
||||
@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
|
||||
table_start, table_size,
|
||||
NULL, 0);
|
||||
|
||||
init_unwind_hdr(table, unw_hdr_alloc);
|
||||
|
||||
#ifdef UNWIND_DEBUG
|
||||
unw_debug("Table added for [%s] %lx %lx\n",
|
||||
module->name, table->core.pc, table->core.range);
|
||||
@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
|
||||
info.init_only = init_only;
|
||||
|
||||
unlink_table(&info); /* XXX: SMP */
|
||||
kfree(table->header);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
|
||||
|
||||
if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
|
||||
|| (*cie & (sizeof(*cie) - 1))
|
||||
|| (cie[1] != 0xffffffff))
|
||||
|| (cie[1] != 0xffffffff)
|
||||
|| ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
|
||||
return NULL; /* this is not a (valid) CIE */
|
||||
return cie;
|
||||
}
|
||||
|
@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||
int in_use = 0;
|
||||
|
||||
if (!low_mem_sz) {
|
||||
BUG_ON(base != low_mem_start);
|
||||
if (base != low_mem_start)
|
||||
panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
|
||||
|
||||
low_mem_sz = size;
|
||||
in_use = 1;
|
||||
} else {
|
||||
|
Loading…
Reference in New Issue
Block a user