linux-hardened/arch/mips/include/asm/cmpxchg.h

217 lines
5.3 KiB
C
Raw Normal View History

/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
*/
#ifndef __ASM_CMPXCHG_H
#define __ASM_CMPXCHG_H
#include <linux/bug.h>
#include <linux/irqflags.h>
MIPS: Fix microMIPS LL/SC immediate offsets In the microMIPS encoding some memory access instructions have their immediate offset reduced to 12 bits only. That does not match the GCC `R' constraint we use in some places to satisfy the requirement, resulting in build failures like this: {standard input}: Assembler messages: {standard input}:720: Error: macro used $at after ".set noat" {standard input}:720: Warning: macro instruction expanded into multiple instructions Fix the problem by defining a macro, `GCC_OFF12_ASM', that expands to the right constraint depending on whether microMIPS or standard MIPS code is produced. Also apply the fix to where `m' is used as in the worst case this change does nothing, e.g. where the pointer was already in a register such as a function argument and no further offset was requested, and in the best case it avoids an extraneous sequence of up to two instructions to load the high 20 bits of the address in the LL/SC loop. This reduces the risk of lock contention that is the higher the more instructions there are in the critical section between LL and SC. Strictly speaking we could just bulk-replace `R' with `ZC' as the latter constraint adjusts automatically depending on the ISA selected. However it was only introduced with GCC 4.9 and we keep supporing older compilers for the standard MIPS configuration, hence the slightly more complicated approach I chose. The choice of a zero-argument function-like rather than an object-like macro was made so that it does not look like a function call taking the C expression used for the constraint as an argument. This is so as not to confuse the reader or formatting checkers like `checkpatch.pl' and follows previous practice. Signed-off-by: Maciej W. Rozycki <macro@codesourcery.com> Signed-off-by: Steven J. Hill <Steven.Hill@imgtec.com> Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8482/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2014-11-15 23:08:48 +01:00
#include <asm/compiler.h>
#include <asm/war.h>
/*
* Using a branch-likely instruction to check the result of an sc instruction
* works around a bug present in R10000 CPUs prior to revision 3.0 that could
* cause ll-sc sequences to execute non-atomically.
*/
#if R10000_LLSC_WAR
# define __scbeqz "beqzl"
#else
# define __scbeqz "beqz"
#endif
/*
* These functions doesn't exist, so if they are called you'll either:
*
* - Get an error at compile-time due to __compiletime_error, if supported by
* your compiler.
*
* or:
*
* - Get an error at link-time due to the call to the missing function.
*/
extern unsigned long __cmpxchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for cmpxchg");
extern unsigned long __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg");
#define __xchg_asm(ld, st, m, val) \
({ \
__typeof(*(m)) __ret; \
\
if (kernel_uses_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set push \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
"1: " ld " %0, %2 # __xchg_asm \n" \
" .set pop \n" \
" move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \
"\t" __scbeqz " $1, 1b \n" \
" .set pop \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
: "memory"); \
} else { \
unsigned long __flags; \
\
raw_local_irq_save(__flags); \
__ret = *m; \
*m = val; \
raw_local_irq_restore(__flags); \
} \
\
__ret; \
})
extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
unsigned int size);
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
int size)
{
switch (size) {
case 1:
case 2:
return __xchg_small(ptr, x, size);
case 4:
return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x);
case 8:
if (!IS_ENABLED(CONFIG_64BIT))
return __xchg_called_with_bad_pointer();
return __xchg_asm("lld", "scd", (volatile u64 *)ptr, x);
default:
return __xchg_called_with_bad_pointer();
}
}
#define xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __res; \
\
smp_mb__before_llsc(); \
\
__res = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
\
smp_llsc_mb(); \
\
__res; \
})
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
\
if (kernel_uses_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set push \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set pop \n" \
" move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
"\t" __scbeqz " $1, 1b \n" \
" .set pop \n" \
"2: \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else { \
unsigned long __flags; \
\
raw_local_irq_save(__flags); \
__ret = *m; \
if (__ret == old) \
*m = new; \
raw_local_irq_restore(__flags); \
} \
\
__ret; \
})
extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size);
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
switch (size) {
case 1:
case 2:
return __cmpxchg_small(ptr, old, new, size);
case 4:
MIPS: Fix cmpxchg on 32b signed ints for 64b kernel with !kernel_uses_llsc Commit 8263db4d7768 ("MIPS: cmpxchg: Implement __cmpxchg() as a function") refactored our implementation of __cmpxchg() to be a function rather than a macro, with the aim of making it easier to read & modify. Unfortunately the commit breaks use of cmpxchg() for signed 32 bit values when we have a 64 bit kernel with kernel_uses_llsc == false, because: - In cmpxchg_local() we cast the old value to the type the pointer points to, and then to an unsigned long. If the pointer points to a signed type smaller than 64 bits then the old value will be sign extended to 64 bits. That is, bits beyond the size of the pointed to type will be set to 1 if the old value is negative. In the case of a signed 32 bit integer with a negative value, bits 63:32 will all be set. - In __cmpxchg_asm() we load the value from memory, ie. dereference the pointer, and store the value as an unsigned integer (__ret) whose size matches the pointer. For a 32 bit cmpxchg() this means we store the value in a u32, because the pointer provided to __cmpxchg_asm() by __cmpxchg() is of type volatile u32 *. - __cmpxchg_asm() then checks whether the value in memory (__ret) matches the provided old value, by comparing the two values. This results in the u32 being promoted to a 64 bit unsigned long to match the old argument - however because both types are unsigned the value is zero extended, which does not match the sign extension performed on the old value in cmpxchg_local() earlier. This mismatch means that unfortunate cmpxchg() calls can incorrectly fail for 64 bit kernels with kernel_uses_llsc == false. This is the case on at least non-SMP Cavium Octeon kernels, which hardcode kernel_uses_llsc in their cpu-feature-overrides.h header. Using a v4.13-rc7 kernel configured using cavium_octeon_defconfig with SMP manually disabled, this presents itself as oddity when we reach userland - for example: can't run '/bin/mount': Text file busy can't run '/bin/mkdir': Text file busy can't run '/bin/mkdir': Text file busy can't run '/bin/mount': Text file busy can't run '/bin/hostname': Text file busy can't run '/etc/init.d/rcS': Text file busy can't run '/sbin/getty': Text file busy can't run '/sbin/getty': Text file busy It appears that some part of the init process, which is in this case buildroot's busybox init, is running successfully. It never manages to reach the login prompt though, and complains about /sbin/getty being busy repeatedly and indefinitely. Fix this by casting the old value provided to __cmpxchg_asm() to an appropriately sized unsigned integer, such that we consistently zero-extend avoiding the mismatch. The __cmpxchg_small() case for 8 & 16 bit values is unaffected because __cmpxchg_small() already masks provided values appropriately. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 8263db4d7768 ("MIPS: cmpxchg: Implement __cmpxchg() as a function") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17226/ Cc: linux-mips@linux-mips.org Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-09-01 23:46:50 +02:00
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr,
(u32)old, new);
case 8:
/* lld/scd are only available for MIPS64 */
if (!IS_ENABLED(CONFIG_64BIT))
return __cmpxchg_called_with_bad_pointer();
MIPS: Fix cmpxchg on 32b signed ints for 64b kernel with !kernel_uses_llsc Commit 8263db4d7768 ("MIPS: cmpxchg: Implement __cmpxchg() as a function") refactored our implementation of __cmpxchg() to be a function rather than a macro, with the aim of making it easier to read & modify. Unfortunately the commit breaks use of cmpxchg() for signed 32 bit values when we have a 64 bit kernel with kernel_uses_llsc == false, because: - In cmpxchg_local() we cast the old value to the type the pointer points to, and then to an unsigned long. If the pointer points to a signed type smaller than 64 bits then the old value will be sign extended to 64 bits. That is, bits beyond the size of the pointed to type will be set to 1 if the old value is negative. In the case of a signed 32 bit integer with a negative value, bits 63:32 will all be set. - In __cmpxchg_asm() we load the value from memory, ie. dereference the pointer, and store the value as an unsigned integer (__ret) whose size matches the pointer. For a 32 bit cmpxchg() this means we store the value in a u32, because the pointer provided to __cmpxchg_asm() by __cmpxchg() is of type volatile u32 *. - __cmpxchg_asm() then checks whether the value in memory (__ret) matches the provided old value, by comparing the two values. This results in the u32 being promoted to a 64 bit unsigned long to match the old argument - however because both types are unsigned the value is zero extended, which does not match the sign extension performed on the old value in cmpxchg_local() earlier. This mismatch means that unfortunate cmpxchg() calls can incorrectly fail for 64 bit kernels with kernel_uses_llsc == false. This is the case on at least non-SMP Cavium Octeon kernels, which hardcode kernel_uses_llsc in their cpu-feature-overrides.h header. Using a v4.13-rc7 kernel configured using cavium_octeon_defconfig with SMP manually disabled, this presents itself as oddity when we reach userland - for example: can't run '/bin/mount': Text file busy can't run '/bin/mkdir': Text file busy can't run '/bin/mkdir': Text file busy can't run '/bin/mount': Text file busy can't run '/bin/hostname': Text file busy can't run '/etc/init.d/rcS': Text file busy can't run '/sbin/getty': Text file busy can't run '/sbin/getty': Text file busy It appears that some part of the init process, which is in this case buildroot's busybox init, is running successfully. It never manages to reach the login prompt though, and complains about /sbin/getty being busy repeatedly and indefinitely. Fix this by casting the old value provided to __cmpxchg_asm() to an appropriately sized unsigned integer, such that we consistently zero-extend avoiding the mismatch. The __cmpxchg_small() case for 8 & 16 bit values is unaffected because __cmpxchg_small() already masks provided values appropriately. Signed-off-by: Paul Burton <paul.burton@imgtec.com> Fixes: 8263db4d7768 ("MIPS: cmpxchg: Implement __cmpxchg() as a function") Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/17226/ Cc: linux-mips@linux-mips.org Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2017-09-01 23:46:50 +02:00
return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr,
(u64)old, new);
default:
return __cmpxchg_called_with_bad_pointer();
}
}
#define cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \
__cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr))))
#define cmpxchg(ptr, old, new) \
({ \
__typeof__(*(ptr)) __res; \
\
smp_mb__before_llsc(); \
__res = cmpxchg_local((ptr), (old), (new)); \
smp_llsc_mb(); \
\
__res; \
})
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#define cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#else
#include <asm-generic/cmpxchg-local.h>
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#ifndef CONFIG_SMP
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif
#endif
#undef __scbeqz
#endif /* __ASM_CMPXCHG_H */