2017-11-02 19:12:34 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2017 ARM Ltd.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
#ifndef __ASM_DAIFFLAGS_H
|
|
|
|
#define __ASM_DAIFFLAGS_H
|
|
|
|
|
|
|
|
#include <linux/irqflags.h>
|
|
|
|
|
2019-01-31 21:58:51 +07:00
|
|
|
#include <asm/cpufeature.h>
|
|
|
|
|
2017-11-02 19:12:36 +07:00
|
|
|
#define DAIF_PROCCTX 0
|
|
|
|
#define DAIF_PROCCTX_NOIRQ PSR_I_BIT
|
2019-01-30 01:48:50 +07:00
|
|
|
#define DAIF_ERRCTX (PSR_I_BIT | PSR_A_BIT)
|
2017-11-02 19:12:36 +07:00
|
|
|
|
2017-11-02 19:12:34 +07:00
|
|
|
/* mask/save/unmask/restore all exceptions, including interrupts. */
|
|
|
|
static inline void local_daif_mask(void)
|
|
|
|
{
|
|
|
|
asm volatile(
|
|
|
|
"msr daifset, #0xf // local_daif_mask\n"
|
|
|
|
:
|
|
|
|
:
|
|
|
|
: "memory");
|
|
|
|
trace_hardirqs_off();
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long local_daif_save(void)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
2019-01-31 21:58:51 +07:00
|
|
|
flags = read_sysreg(daif);
|
|
|
|
|
|
|
|
if (system_uses_irq_prio_masking()) {
|
|
|
|
/* If IRQs are masked with PMR, reflect it in the flags */
|
|
|
|
if (read_sysreg_s(SYS_ICC_PMR_EL1) <= GIC_PRIO_IRQOFF)
|
|
|
|
flags |= PSR_I_BIT;
|
|
|
|
}
|
2018-08-28 22:51:14 +07:00
|
|
|
|
2017-11-02 19:12:34 +07:00
|
|
|
local_daif_mask();
|
|
|
|
|
|
|
|
return flags;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void local_daif_restore(unsigned long flags)
|
|
|
|
{
|
2019-01-31 21:58:51 +07:00
|
|
|
bool irq_disabled = flags & PSR_I_BIT;
|
|
|
|
|
|
|
|
if (!irq_disabled) {
|
2017-11-02 19:12:34 +07:00
|
|
|
trace_hardirqs_on();
|
2018-08-28 22:51:14 +07:00
|
|
|
|
2019-01-31 21:58:51 +07:00
|
|
|
if (system_uses_irq_prio_masking())
|
|
|
|
arch_local_irq_enable();
|
|
|
|
} else if (!(flags & PSR_A_BIT)) {
|
|
|
|
/*
|
|
|
|
* If interrupts are disabled but we can take
|
|
|
|
* asynchronous errors, we can take NMIs
|
|
|
|
*/
|
|
|
|
if (system_uses_irq_prio_masking()) {
|
|
|
|
flags &= ~PSR_I_BIT;
|
|
|
|
/*
|
|
|
|
* There has been concern that the write to daif
|
|
|
|
* might be reordered before this write to PMR.
|
|
|
|
* From the ARM ARM DDI 0487D.a, section D1.7.1
|
|
|
|
* "Accessing PSTATE fields":
|
|
|
|
* Writes to the PSTATE fields have side-effects on
|
|
|
|
* various aspects of the PE operation. All of these
|
|
|
|
* side-effects are guaranteed:
|
|
|
|
* - Not to be visible to earlier instructions in
|
|
|
|
* the execution stream.
|
|
|
|
* - To be visible to later instructions in the
|
|
|
|
* execution stream
|
|
|
|
*
|
|
|
|
* Also, writes to PMR are self-synchronizing, so no
|
|
|
|
* interrupts with a lower priority than PMR is signaled
|
|
|
|
* to the PE after the write.
|
|
|
|
*
|
|
|
|
* So we don't need additional synchronization here.
|
|
|
|
*/
|
|
|
|
arch_local_irq_disable();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
write_sysreg(flags, daif);
|
2018-08-28 22:51:14 +07:00
|
|
|
|
2019-01-31 21:58:51 +07:00
|
|
|
if (irq_disabled)
|
2017-11-02 19:12:34 +07:00
|
|
|
trace_hardirqs_off();
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|