From 7dd6662a92fe9a15ad565045aa60367995cc533d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 07:43:21 +0900 Subject: [PATCH] sh: delay slot future proofing via EXPMASK on SH-4A parts. This implements EXPMASK initialization code for SH-4A parts, where it is possible to disable compat features that will go away in newer cores. Presently this includes disabling support for non-nop instructions in the rte delay slot, as well as a sleep instruction being placed in a delay slot (neither of which the kernel does any longer). As a result of this, any future offenders will have illegal slot exceptions generated for them. Associative writes for the memory-mapped cache array are still left enabled, until such a point that special cache operations for SH-4A are provided to move off of the current (and rather dated) SH-4 versions. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/init.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ad85421099cd..d40b9db5be03 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -3,7 +3,7 @@ * * CPU init code * - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -62,6 +62,37 @@ static void __init speculative_execution_init(void) #define speculative_execution_init() do { } while (0) #endif +#ifdef CONFIG_CPU_SH4A +#define EXPMASK 0xff2f0004 +#define EXPMASK_RTEDS (1 << 0) +#define EXPMASK_BRDSSLP (1 << 1) +#define EXPMASK_MMCAW (1 << 4) + +static void __init expmask_init(void) +{ + unsigned long expmask = __raw_readl(EXPMASK); + + /* + * Future proofing. + * + * Disable support for slottable sleep instruction + * and non-nop instructions in the rte delay slot. + */ + expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP); + + /* + * Enable associative writes to the memory-mapped cache array + * until the cache flush ops have been rewritten. + */ + expmask |= EXPMASK_MMCAW; + + __raw_writel(expmask, EXPMASK); + ctrl_barrier(); +} +#else +#define expmask_init() do { } while (0) +#endif + /* 2nd-level cache init */ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) { @@ -321,4 +352,5 @@ asmlinkage void __init sh_cpu_init(void) #endif speculative_execution_init(); + expmask_init(); }