mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 15:01:13 +07:00
Merge branch 'parisc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller: "Some cleanups in arch_rw locking functions, improved interrupt handling in arch spinlocks, coversions to request_irq() and syscall table generation cleanups" * 'parisc-5.7-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: remove nargs from __SYSCALL parisc: Refactor alternative code to accept multiple conditions parisc: Rework arch_rw locking functions parisc: Improve interrupt handling in arch_spin_lock_flags() parisc: Replace setup_irq() by request_irq()
This commit is contained in:
commit
f9db97d751
@ -10,25 +10,34 @@
|
||||
static inline int arch_spin_is_locked(arch_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a = __ldcw_align(x);
|
||||
smp_mb();
|
||||
return *a == 0;
|
||||
}
|
||||
|
||||
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
|
||||
|
||||
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
||||
unsigned long flags)
|
||||
static inline void arch_spin_lock(arch_spinlock_t *x)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
while (__ldcw(a) == 0)
|
||||
while (*a == 0)
|
||||
if (flags & PSW_SM_I) {
|
||||
local_irq_enable();
|
||||
cpu_relax();
|
||||
local_irq_disable();
|
||||
} else
|
||||
cpu_relax();
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
|
||||
unsigned long flags)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
unsigned long flags_dis;
|
||||
|
||||
a = __ldcw_align(x);
|
||||
while (__ldcw(a) == 0) {
|
||||
local_save_flags(flags_dis);
|
||||
local_irq_restore(flags);
|
||||
while (*a == 0)
|
||||
cpu_relax();
|
||||
local_irq_restore(flags_dis);
|
||||
}
|
||||
}
|
||||
#define arch_spin_lock_flags arch_spin_lock_flags
|
||||
|
||||
@ -58,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||
|
||||
/*
|
||||
* Read-write spinlocks, allowing multiple readers but only one writer.
|
||||
* Linux rwlocks are unfair to writers; they can be starved for an indefinite
|
||||
* time by readers. With care, they can also be taken in interrupt context.
|
||||
* Unfair locking as Writers could be starved indefinitely by Reader(s)
|
||||
*
|
||||
* In the PA-RISC implementation, we have a spinlock and a counter.
|
||||
* Readers use the lock to serialise their access to the counter (which
|
||||
* records how many readers currently hold the lock).
|
||||
* Writers hold the spinlock, preventing any readers or other writers from
|
||||
* grabbing the rwlock.
|
||||
* The spinlock itself is contained in @counter and access to it is
|
||||
* serialized with @lock_mutex.
|
||||
*/
|
||||
|
||||
/* Note that we have to ensure interrupts are disabled in case we're
|
||||
* interrupted by some other code that wants to grab the same read lock */
|
||||
static __inline__ void arch_read_lock(arch_rwlock_t *rw)
|
||||
/* 1 - lock taken successfully */
|
||||
static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock_flags(&rw->lock, flags);
|
||||
rw->counter++;
|
||||
arch_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* Note that we have to ensure interrupts are disabled in case we're
|
||||
* interrupted by some other code that wants to grab the same read lock */
|
||||
static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock_flags(&rw->lock, flags);
|
||||
rw->counter--;
|
||||
arch_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
arch_spin_lock(&(rw->lock_mutex));
|
||||
|
||||
/* Note that we have to ensure interrupts are disabled in case we're
|
||||
* interrupted by some other code that wants to grab the same read lock */
|
||||
static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
retry:
|
||||
local_irq_save(flags);
|
||||
if (arch_spin_trylock(&rw->lock)) {
|
||||
rw->counter++;
|
||||
arch_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
return 1;
|
||||
/*
|
||||
* zero means writer holds the lock exclusively, deny Reader.
|
||||
* Otherwise grant lock to first/subseq reader
|
||||
*/
|
||||
if (rw->counter > 0) {
|
||||
rw->counter--;
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
arch_spin_unlock(&(rw->lock_mutex));
|
||||
local_irq_restore(flags);
|
||||
/* If write-locked, we fail to acquire the lock */
|
||||
if (rw->counter < 0)
|
||||
return 0;
|
||||
|
||||
/* Wait until we have a realistic chance at the lock */
|
||||
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* 1 - lock taken successfully */
|
||||
static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock(&(rw->lock_mutex));
|
||||
|
||||
/*
|
||||
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
|
||||
* deny writer. Otherwise if unlocked grant to writer
|
||||
* Hence the claim that Linux rwlocks are unfair to writers.
|
||||
* (can be starved for an indefinite time by readers).
|
||||
*/
|
||||
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
|
||||
rw->counter = 0;
|
||||
ret = 1;
|
||||
}
|
||||
arch_spin_unlock(&(rw->lock_mutex));
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
while (!arch_read_trylock(rw))
|
||||
cpu_relax();
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* Note that we have to ensure interrupts are disabled in case we're
|
||||
* interrupted by some other code that wants to read_trylock() this lock */
|
||||
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
|
||||
static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
while (!arch_write_trylock(rw))
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
retry:
|
||||
|
||||
local_irq_save(flags);
|
||||
arch_spin_lock_flags(&rw->lock, flags);
|
||||
|
||||
if (rw->counter != 0) {
|
||||
arch_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
while (rw->counter != 0)
|
||||
cpu_relax();
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
rw->counter = -1; /* mark as write-locked */
|
||||
mb();
|
||||
arch_spin_lock(&(rw->lock_mutex));
|
||||
rw->counter++;
|
||||
arch_spin_unlock(&(rw->lock_mutex));
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
rw->counter = 0;
|
||||
arch_spin_unlock(&rw->lock);
|
||||
}
|
||||
|
||||
/* Note that we have to ensure interrupts are disabled in case we're
|
||||
* interrupted by some other code that wants to read_trylock() this lock */
|
||||
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
int result = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (arch_spin_trylock(&rw->lock)) {
|
||||
if (rw->counter == 0) {
|
||||
rw->counter = -1;
|
||||
result = 1;
|
||||
} else {
|
||||
/* Read-locked. Oh well. */
|
||||
arch_spin_unlock(&rw->lock);
|
||||
}
|
||||
}
|
||||
arch_spin_lock(&(rw->lock_mutex));
|
||||
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
|
||||
arch_spin_unlock(&(rw->lock_mutex));
|
||||
local_irq_restore(flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#endif /* __ASM_SPINLOCK_H */
|
||||
|
@ -12,11 +12,19 @@ typedef struct {
|
||||
#endif
|
||||
} arch_spinlock_t;
|
||||
|
||||
|
||||
/* counter:
|
||||
* Unlocked : 0x0100_0000
|
||||
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
|
||||
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
|
||||
*/
|
||||
typedef struct {
|
||||
arch_spinlock_t lock;
|
||||
volatile int counter;
|
||||
arch_spinlock_t lock_mutex;
|
||||
volatile unsigned int counter;
|
||||
} arch_rwlock_t;
|
||||
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
|
||||
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
|
||||
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
|
||||
.counter = __ARCH_RW_LOCK_UNLOCKED__ }
|
||||
|
||||
#endif
|
||||
|
@ -25,6 +25,22 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
|
||||
struct alt_instr *entry;
|
||||
int index = 0, applied = 0;
|
||||
int num_cpus = num_online_cpus();
|
||||
u32 cond_check;
|
||||
|
||||
cond_check = ALT_COND_ALWAYS |
|
||||
((num_cpus == 1) ? ALT_COND_NO_SMP : 0) |
|
||||
((cache_info.dc_size == 0) ? ALT_COND_NO_DCACHE : 0) |
|
||||
((cache_info.ic_size == 0) ? ALT_COND_NO_ICACHE : 0) |
|
||||
(running_on_qemu ? ALT_COND_RUN_ON_QEMU : 0) |
|
||||
((split_tlb == 0) ? ALT_COND_NO_SPLIT_TLB : 0) |
|
||||
/*
|
||||
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
|
||||
* set (bit #61, big endian), we have to flush and sync every
|
||||
* time IO-PDIR is changed in Ike/Astro.
|
||||
*/
|
||||
(((boot_cpu_data.cpu_type > pcxw_) &&
|
||||
((boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC) == 0))
|
||||
? ALT_COND_NO_IOC_FDC : 0);
|
||||
|
||||
for (entry = start; entry < end; entry++, index++) {
|
||||
|
||||
@ -38,29 +54,14 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
|
||||
|
||||
WARN_ON(!cond);
|
||||
|
||||
if (cond != ALT_COND_ALWAYS && no_alternatives)
|
||||
if ((cond & ALT_COND_ALWAYS) == 0 && no_alternatives)
|
||||
continue;
|
||||
|
||||
pr_debug("Check %d: Cond 0x%x, Replace %02d instructions @ 0x%px with 0x%08x\n",
|
||||
index, cond, len, from, replacement);
|
||||
|
||||
if ((cond & ALT_COND_NO_SMP) && (num_cpus != 1))
|
||||
continue;
|
||||
if ((cond & ALT_COND_NO_DCACHE) && (cache_info.dc_size != 0))
|
||||
continue;
|
||||
if ((cond & ALT_COND_NO_ICACHE) && (cache_info.ic_size != 0))
|
||||
continue;
|
||||
if ((cond & ALT_COND_RUN_ON_QEMU) && !running_on_qemu)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit
|
||||
* set (bit #61, big endian), we have to flush and sync every
|
||||
* time IO-PDIR is changed in Ike/Astro.
|
||||
*/
|
||||
if ((cond & ALT_COND_NO_IOC_FDC) &&
|
||||
((boot_cpu_data.cpu_type <= pcxw_) ||
|
||||
(boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC)))
|
||||
/* Bounce out if none of the conditions are true. */
|
||||
if ((cond & cond_check) == 0)
|
||||
continue;
|
||||
|
||||
/* Want to replace pdtlb by a pdtlb,l instruction? */
|
||||
|
@ -560,33 +560,23 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
goto out;
|
||||
}
|
||||
|
||||
static struct irqaction timer_action = {
|
||||
.handler = timer_interrupt,
|
||||
.name = "timer",
|
||||
.flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static struct irqaction ipi_action = {
|
||||
.handler = ipi_interrupt,
|
||||
.name = "IPI",
|
||||
.flags = IRQF_PERCPU,
|
||||
};
|
||||
#endif
|
||||
|
||||
static void claim_cpu_irqs(void)
|
||||
{
|
||||
unsigned long flags = IRQF_TIMER | IRQF_PERCPU | IRQF_IRQPOLL;
|
||||
int i;
|
||||
|
||||
for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
|
||||
irq_set_chip_and_handler(i, &cpu_interrupt_type,
|
||||
handle_percpu_irq);
|
||||
}
|
||||
|
||||
irq_set_handler(TIMER_IRQ, handle_percpu_irq);
|
||||
setup_irq(TIMER_IRQ, &timer_action);
|
||||
if (request_irq(TIMER_IRQ, timer_interrupt, flags, "timer", NULL))
|
||||
pr_err("Failed to register timer interrupt\n");
|
||||
#ifdef CONFIG_SMP
|
||||
irq_set_handler(IPI_IRQ, handle_percpu_irq);
|
||||
setup_irq(IPI_IRQ, &ipi_action);
|
||||
if (request_irq(IPI_IRQ, ipi_interrupt, IRQF_PERCPU, "IPI", NULL))
|
||||
pr_err("Failed to register IPI interrupt\n");
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -935,7 +935,7 @@ ENTRY(lws_table)
|
||||
END(lws_table)
|
||||
/* End of lws table */
|
||||
|
||||
#define __SYSCALL(nr, entry, nargs) ASM_ULONG_INSN entry
|
||||
#define __SYSCALL(nr, entry) ASM_ULONG_INSN entry
|
||||
.align 8
|
||||
ENTRY(sys_call_table)
|
||||
.export sys_call_table,data
|
||||
|
@ -13,10 +13,10 @@ emit() {
|
||||
t_entry="$3"
|
||||
|
||||
while [ $t_nxt -lt $t_nr ]; do
|
||||
printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
|
||||
printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}"
|
||||
t_nxt=$((t_nxt+1))
|
||||
done
|
||||
printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
|
||||
printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}"
|
||||
}
|
||||
|
||||
grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
|
||||
|
@ -243,11 +243,6 @@ static irqreturn_t dummy_irq2_handler(int _, void *dev)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction irq2_action = {
|
||||
.handler = dummy_irq2_handler,
|
||||
.name = "cascade",
|
||||
};
|
||||
|
||||
static void init_eisa_pic(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -335,7 +330,8 @@ static int __init eisa_probe(struct parisc_device *dev)
|
||||
}
|
||||
|
||||
/* Reserve IRQ2 */
|
||||
setup_irq(2, &irq2_action);
|
||||
if (request_irq(2, dummy_irq2_handler, 0, "cascade", NULL))
|
||||
pr_err("Failed to request irq 2 (cascade)\n");
|
||||
for (i = 0; i < 16; i++) {
|
||||
irq_set_chip_and_handler(i, &eisa_interrupt_type,
|
||||
handle_simple_irq);
|
||||
|
Loading…
Reference in New Issue
Block a user