x86: put movsl_mask into uaccess.h.
x86_64 does not need it, but it won't have X86_INTEL_USERCOPY defined either. Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
8cb834e99f
commit
8bc7de0c5d
2 changed files with 9 additions and 9 deletions
|
@ -432,6 +432,15 @@ struct __large_struct { unsigned long buf[100]; };
|
||||||
#define __get_user_unaligned __get_user
|
#define __get_user_unaligned __get_user
|
||||||
#define __put_user_unaligned __put_user
|
#define __put_user_unaligned __put_user
|
||||||
|
|
||||||
|
/*
|
||||||
|
* movsl can be slow when source and dest are not both 8-byte aligned
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_X86_INTEL_USERCOPY
|
||||||
|
extern struct movsl_mask {
|
||||||
|
int mask;
|
||||||
|
} ____cacheline_aligned_in_smp movsl_mask;
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
# include "uaccess_32.h"
|
# include "uaccess_32.h"
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -11,15 +11,6 @@
|
||||||
#include <asm/asm.h>
|
#include <asm/asm.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
|
||||||
/*
|
|
||||||
* movsl can be slow when source and dest are not both 8-byte aligned
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_X86_INTEL_USERCOPY
|
|
||||||
extern struct movsl_mask {
|
|
||||||
int mask;
|
|
||||||
} ____cacheline_aligned_in_smp movsl_mask;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
unsigned long __must_check __copy_to_user_ll
|
unsigned long __must_check __copy_to_user_ll
|
||||||
(void __user *to, const void *from, unsigned long n);
|
(void __user *to, const void *from, unsigned long n);
|
||||||
unsigned long __must_check __copy_from_user_ll
|
unsigned long __must_check __copy_from_user_ll
|
||||||
|
|
Loading…
Reference in a new issue