From a14949e09a228dcd4cc5088c90c375429c7d102c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 30 Jul 2015 19:19:43 +0100 Subject: [PATCH] arm64: cmpxchg: truncate sub-word signed types before comparison When performing a cmpxchg operation on a signed sub-word type (e.g. s8), we need to ensure that the upper register bits of the "old" value used for comparison are zeroed, otherwise we may erroneously fail the cmpxchg which may even be interpreted as success by the caller (if the compiler performs the truncation as part of its check). This has been observed in mod_state, where negative values where causing problems with this_cpu_cmpxchg. This patch fixes the issue by explicitly casting 8-bit and 16-bit "old" values using unsigned types in our cmpxchg wrappers. 32-bit types can be left alone, since the underlying asm makes use of W registers in this case. Reported-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/cmpxchg.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 7bfda0944c9b..899e9f1d19e4 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -122,9 +122,9 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, { switch (size) { case 1: - return __cmpxchg_case_1(ptr, old, new); + return __cmpxchg_case_1(ptr, (u8)old, new); case 2: - return __cmpxchg_case_2(ptr, old, new); + return __cmpxchg_case_2(ptr, (u16)old, new); case 4: return __cmpxchg_case_4(ptr, old, new); case 8: @@ -141,9 +141,9 @@ static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old, { switch (size) { case 1: - return __cmpxchg_case_mb_1(ptr, old, new); + return __cmpxchg_case_mb_1(ptr, (u8)old, new); case 2: - return __cmpxchg_case_mb_2(ptr, old, new); + return __cmpxchg_case_mb_2(ptr, (u16)old, new); case 4: return __cmpxchg_case_mb_4(ptr, old, new); case 8: