futex: Sanitize futex ops argument types
Change futex_atomic_op_inuser and futex_atomic_cmpxchg_inatomic prototypes to use u32 types for the futex as this is the data type the futex core code uses all over the place. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Darren Hart <darren@dvhart.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: David Howells <dhowells@redhat.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: "James E.J. Bottomley" <jejb@parisc-linux.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> LKML-Reference: <20110311025058.GD26122@google.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
37a9d912b2
commit
8d7718aa08
20 changed files with 116 additions and 110 deletions
|
@ -29,7 +29,7 @@
|
|||
: "r" (uaddr), "r"(oparg) \
|
||||
: "memory")
|
||||
|
||||
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -81,12 +81,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0, prev, cmp;
|
||||
int ret = 0, cmp;
|
||||
u32 prev;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
: "cc", "memory")
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable(); /* implies preempt_disable() */
|
||||
|
@ -88,12 +88,13 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0, val;
|
||||
int ret = 0;
|
||||
u32 val;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
/* Note that preemption is disabled by futex_atomic_cmpxchg_inatomic
|
||||
|
|
|
@ -7,11 +7,11 @@
|
|||
#include <asm/errno.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr);
|
||||
extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
* the various futex operations; MMU fault checking is ignored under no-MMU
|
||||
* conditions
|
||||
*/
|
||||
static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval)
|
||||
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval)
|
||||
{
|
||||
int oldval, ret;
|
||||
|
||||
|
@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_o
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval)
|
||||
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval)
|
||||
{
|
||||
int oldval, ret;
|
||||
|
||||
|
@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_o
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval)
|
||||
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval)
|
||||
{
|
||||
int oldval, ret;
|
||||
|
||||
|
@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_ol
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval)
|
||||
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval)
|
||||
{
|
||||
int oldval, ret;
|
||||
|
||||
|
@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_o
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval)
|
||||
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval)
|
||||
{
|
||||
int oldval, ret;
|
||||
|
||||
|
@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_o
|
|||
/*
|
||||
* do the futex operations
|
||||
*/
|
||||
int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
|
|
@ -46,7 +46,7 @@ do { \
|
|||
} while (0)
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -100,10 +100,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
{
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
})
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -94,12 +94,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0, prev, cmp;
|
||||
int ret = 0, cmp;
|
||||
u32 prev;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
__asm__ __volatile__ ("1: lwx %1, %3, r0; \
|
||||
|
|
|
@ -75,7 +75,7 @@
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -132,12 +132,13 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0, val;
|
||||
int ret = 0;
|
||||
u32 val;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
if (cpu_has_llsc && R10000_LLSC_WAR) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
#include <asm/errno.h>
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
|
||||
/* Non-atomic version */
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int val;
|
||||
u32 val;
|
||||
|
||||
/* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
|
||||
* our gateway page, and causes no end of trouble...
|
||||
|
@ -62,7 +62,7 @@ futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
|||
if (segment_eq(KERNEL_DS, get_fs()) && !uaddr)
|
||||
return -EFAULT;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
if (get_user(val, uaddr))
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
|
||||
: "cr0", "memory")
|
||||
|
||||
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -82,12 +82,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0, prev;
|
||||
int ret = 0;
|
||||
u32 prev;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -39,10 +39,10 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
|
||||
|
|
|
@ -83,8 +83,8 @@ struct uaccess_ops {
|
|||
size_t (*clear_user)(size_t, void __user *);
|
||||
size_t (*strnlen_user)(size_t, const char __user *);
|
||||
size_t (*strncpy_from_user)(size_t, const char __user *, char *);
|
||||
int (*futex_atomic_op)(int op, int __user *, int oparg, int *old);
|
||||
int (*futex_atomic_cmpxchg)(int *, int __user *, int old, int new);
|
||||
int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old);
|
||||
int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new);
|
||||
};
|
||||
|
||||
extern struct uaccess_ops uaccess;
|
||||
|
|
|
@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *);
|
|||
extern size_t copy_to_user_std(size_t, void __user *, const void *);
|
||||
extern size_t strnlen_user_std(size_t, const char __user *);
|
||||
extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
|
||||
extern int futex_atomic_cmpxchg_std(int *, int __user *, int, int);
|
||||
extern int futex_atomic_op_std(int, int __user *, int, int *);
|
||||
extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32);
|
||||
extern int futex_atomic_op_std(int, u32 __user *, int, int *);
|
||||
|
||||
extern size_t copy_from_user_pt(size_t, const void __user *, void *);
|
||||
extern size_t copy_to_user_pt(size_t, void __user *, const void *);
|
||||
extern int futex_atomic_op_pt(int, int __user *, int, int *);
|
||||
extern int futex_atomic_cmpxchg_pt(int *, int __user *, int, int);
|
||||
extern int futex_atomic_op_pt(int, u32 __user *, int, int *);
|
||||
extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32);
|
||||
|
||||
#endif /* __ARCH_S390_LIB_UACCESS_H */
|
||||
|
|
|
@ -302,7 +302,7 @@ fault:
|
|||
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
||||
"m" (*uaddr) : "cc" );
|
||||
|
||||
static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
||||
static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
|
||||
{
|
||||
int oldval = 0, newval, ret;
|
||||
|
||||
|
@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
||||
int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -354,8 +354,8 @@ int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int __futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -370,8 +370,8 @@ static int __futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int futex_atomic_cmpxchg_pt(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
|
|||
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
||||
"m" (*uaddr) : "cc");
|
||||
|
||||
int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
|
||||
int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old)
|
||||
{
|
||||
int oldval = 0, newval, ret;
|
||||
|
||||
|
@ -287,8 +287,8 @@ int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old)
|
|||
return ret;
|
||||
}
|
||||
|
||||
int futex_atomic_cmpxchg_std(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
|
||||
#include <asm/system.h>
|
||||
|
||||
static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
|
||||
static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr,
|
||||
int *oldval)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
|
||||
static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr,
|
||||
int *oldval)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
|
||||
static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr,
|
||||
int *oldval)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
|
||||
static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr,
|
||||
int *oldval)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
|
||||
static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr,
|
||||
int *oldval)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -88,12 +88,13 @@ static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int atomic_futex_op_cmpxchg_inatomic(int *uval,
|
||||
int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval,
|
||||
u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret, prev = 0;
|
||||
int ret;
|
||||
u32 prev = 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
/* XXX: UP variants, fix for SH-4A and SMP.. */
|
||||
#include <asm/futex-irq.h>
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -65,10 +65,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval);
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
|
||||
: "memory")
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
int cmparg = (encoded_op << 20) >> 20;
|
||||
int oldval = 0, ret, tem;
|
||||
|
||||
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int))))
|
||||
if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
|
||||
return -EFAULT;
|
||||
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
|
||||
return -EINVAL;
|
||||
|
@ -85,8 +85,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
|
|
@ -29,16 +29,16 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
extern struct __get_user futex_set(int __user *v, int i);
|
||||
extern struct __get_user futex_add(int __user *v, int n);
|
||||
extern struct __get_user futex_or(int __user *v, int n);
|
||||
extern struct __get_user futex_andn(int __user *v, int n);
|
||||
extern struct __get_user futex_cmpxchg(int __user *v, int o, int n);
|
||||
extern struct __get_user futex_set(u32 __user *v, int i);
|
||||
extern struct __get_user futex_add(u32 __user *v, int n);
|
||||
extern struct __get_user futex_or(u32 __user *v, int n);
|
||||
extern struct __get_user futex_andn(u32 __user *v, int n);
|
||||
extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
|
||||
|
||||
#ifndef __tilegx__
|
||||
extern struct __get_user futex_xor(int __user *v, int n);
|
||||
extern struct __get_user futex_xor(u32 __user *v, int n);
|
||||
#else
|
||||
static inline struct __get_user futex_xor(int __user *uaddr, int n)
|
||||
static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
|
||||
{
|
||||
struct __get_user asm_ret = __get_user_4(uaddr);
|
||||
if (!asm_ret.err) {
|
||||
|
@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(int __user *uaddr, int n)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -119,12 +119,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
struct __get_user asm_ret;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
asm_ret = futex_cmpxchg(uaddr, oldval, newval);
|
||||
|
|
|
@ -37,7 +37,7 @@
|
|||
"+m" (*uaddr), "=&r" (tem) \
|
||||
: "r" (oparg), "i" (-EFAULT), "1" (0))
|
||||
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
||||
static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP)
|
||||
|
@ -109,8 +109,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -120,7 +120,7 @@ static inline int futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
|||
return -ENOSYS;
|
||||
#endif
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
#include <asm/errno.h>
|
||||
|
||||
static inline int
|
||||
futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
||||
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
|
||||
{
|
||||
int op = (encoded_op >> 28) & 7;
|
||||
int cmp = (encoded_op >> 24) & 15;
|
||||
|
@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
|
||||
oparg = 1 << oparg;
|
||||
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
|
||||
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
|
||||
return -EFAULT;
|
||||
|
||||
pagefault_disable();
|
||||
|
@ -48,8 +48,8 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
|
|||
}
|
||||
|
||||
static inline int
|
||||
futex_atomic_cmpxchg_inatomic(int *uval, int __user *uaddr,
|
||||
int oldval, int newval)
|
||||
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
|
||||
u32 oldval, u32 newval)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue