Merge branch 'master' into driver-core-next-test-merge-rc2

da9846ae15 ("kernfs: make kernfs_deactivate() honor KERNFS_LOCKDEP
flag") in driver-core-linus conflicts with kernfs_drain() updates in
driver-core-next.  The former just adds the missing KERNFS_LOCKDEP
checks which are already handled by kernfs_lockdep() checks in
driver-core-next.  The conflict can be resolved by taking code from
driver-core-next.

Conflicts:
	fs/kernfs/dir.c
This commit is contained in:
Tejun Heo 2014-02-10 19:34:30 -05:00
commit a8fa94e0f2
203 changed files with 4366 additions and 984 deletions

View file

@ -0,0 +1,18 @@
TI-NSPIRE interrupt controller
Required properties:
- compatible: Compatible property value should be "lsi,zevio-intc".
- reg: Physical base address of the controller and length of memory mapped
region.
- interrupt-controller : Identifies the node as an interrupt controller
Example:
interrupt-controller {
compatible = "lsi,zevio-intc";
interrupt-controller;
reg = <0xDC000000 0x1000>;
#interrupt-cells = <1>;
};

View file

@ -78,7 +78,7 @@ Peter Beutner <p.beutner@gmx.net>
Wilson Michaels <wilsonmichaels@earthlink.net> Wilson Michaels <wilsonmichaels@earthlink.net>
for the lgdt330x frontend driver, and various bugfixes for the lgdt330x frontend driver, and various bugfixes
Michael Krufky <mkrufky@m1k.net> Michael Krufky <mkrufky@linuxtv.org>
for maintaining v4l/dvb inter-tree dependencies for maintaining v4l/dvb inter-tree dependencies
Taylor Jacob <rtjacob@earthlink.net> Taylor Jacob <rtjacob@earthlink.net>

View file

@ -1726,16 +1726,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
option description. option description.
memmap=nn[KMG]@ss[KMG] memmap=nn[KMG]@ss[KMG]
[KNL] Force usage of a specific region of memory [KNL] Force usage of a specific region of memory.
Region of memory to be used, from ss to ss+nn. Region of memory to be used is from ss to ss+nn.
memmap=nn[KMG]#ss[KMG] memmap=nn[KMG]#ss[KMG]
[KNL,ACPI] Mark specific memory as ACPI data. [KNL,ACPI] Mark specific memory as ACPI data.
Region of memory to be used, from ss to ss+nn. Region of memory to be marked is from ss to ss+nn.
memmap=nn[KMG]$ss[KMG] memmap=nn[KMG]$ss[KMG]
[KNL,ACPI] Mark specific memory as reserved. [KNL,ACPI] Mark specific memory as reserved.
Region of memory to be used, from ss to ss+nn. Region of memory to be reserved is from ss to ss+nn.
Example: Exclude memory from 0x18690000-0x1869ffff Example: Exclude memory from 0x18690000-0x1869ffff
memmap=64K$0x18690000 memmap=64K$0x18690000
or or

View file

@ -1,7 +1,7 @@
VERSION = 3 VERSION = 3
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc1 EXTRAVERSION = -rc2
NAME = Shuffling Zombie Juror NAME = Shuffling Zombie Juror
# *DOCUMENTATION* # *DOCUMENTATION*

View file

@ -36,6 +36,7 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK select HAVE_MEMBLOCK
select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select IRQ_DOMAIN select IRQ_DOMAIN
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA

View file

@ -1,4 +1,3 @@
CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set # CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set # CONFIG_SWAP is not set
CONFIG_SYSVIPC=y CONFIG_SYSVIPC=y
@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y CONFIG_PROFILING=y
CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set # CONFIG_BLK_DEV_BSG is not set
@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y CONFIG_ARCH_XGENE=y
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0" CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y CONFIG_COMPAT=y
@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_WIRELESS is not set # CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV=y CONFIG_DMA_CMA=y
CONFIG_SCSI=y CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set # CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set # CONFIG_SCSI_LOWLEVEL is not set
CONFIG_ATA=y
CONFIG_PATA_PLATFORM=y
CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_MII=y
CONFIG_SMC91X=y CONFIG_SMC91X=y
CONFIG_SMSC911X=y
# CONFIG_WLAN is not set # CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_I8042 is not set
@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set # CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set # CONFIG_HWMON is not set
CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set # CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set # CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set # CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_USB_SUPPORT is not set CONFIG_USB=y
CONFIG_USB_ISP1760_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set # CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y CONFIG_EXT3_FS=y

View file

@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_add_return\n" asm volatile("// atomic_add_return\n"
"1: ldaxr %w0, %2\n" "1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n" " add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n" " stxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline int atomic_sub_return(int i, atomic_t *v) static inline int atomic_sub_return(int i, atomic_t *v)
@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result; int result;
asm volatile("// atomic_sub_return\n" asm volatile("// atomic_sub_return\n"
"1: ldaxr %w0, %2\n" "1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n" " sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n" " stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp; unsigned long tmp;
int oldval; int oldval;
smp_mb();
asm volatile("// atomic_cmpxchg\n" asm volatile("// atomic_cmpxchg\n"
"1: ldaxr %w1, %2\n" "1: ldxr %w1, %2\n"
" cmp %w1, %w3\n" " cmp %w1, %w3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %w4, %2\n" " stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter) : "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc", "memory"); : "cc");
smp_mb();
return oldval; return oldval;
} }
@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline long atomic64_add_return(long i, atomic64_t *v) static inline long atomic64_add_return(long i, atomic64_t *v)
@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_add_return\n" asm volatile("// atomic64_add_return\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" add %0, %0, %3\n" " add %0, %0, %3\n"
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n" " stxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i));
: "cc");
} }
static inline long atomic64_sub_return(long i, atomic64_t *v) static inline long atomic64_sub_return(long i, atomic64_t *v)
@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_sub_return\n" asm volatile("// atomic64_sub_return\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" sub %0, %0, %3\n" " sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b" " cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i) : "Ir" (i)
: "cc", "memory"); : "memory");
smp_mb();
return result; return result;
} }
@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval; long oldval;
unsigned long res; unsigned long res;
smp_mb();
asm volatile("// atomic64_cmpxchg\n" asm volatile("// atomic64_cmpxchg\n"
"1: ldaxr %1, %2\n" "1: ldxr %1, %2\n"
" cmp %1, %3\n" " cmp %1, %3\n"
" b.ne 2f\n" " b.ne 2f\n"
" stlxr %w0, %4, %2\n" " stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n" " cbnz %w0, 1b\n"
"2:" "2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter) : "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new) : "Ir" (old), "r" (new)
: "cc", "memory"); : "cc");
smp_mb();
return oldval; return oldval;
} }
@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp; unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n" asm volatile("// atomic64_dec_if_positive\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" subs %0, %0, #1\n" " subs %0, %0, #1\n"
" b.mi 2f\n" " b.mi 2f\n"
" stlxr %w1, %0, %2\n" " stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
" dmb ish\n"
"2:" "2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: :

View file

@ -25,7 +25,7 @@
#define wfi() asm volatile("wfi" : : : "memory") #define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory") #define isb() asm volatile("isb" : : : "memory")
#define dsb() asm volatile("dsb sy" : : : "memory") #define dsb(opt) asm volatile("dsb sy" : : : "memory")
#define mb() dsb() #define mb() dsb()
#define rmb() asm volatile("dsb ld" : : : "memory") #define rmb() asm volatile("dsb ld" : : : "memory")

View file

@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void) static inline void __flush_icache_all(void)
{ {
asm("ic ialluis"); asm("ic ialluis");
dsb();
} }
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \

View file

@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) { switch (size) {
case 1: case 1:
asm volatile("// __xchg1\n" asm volatile("// __xchg1\n"
"1: ldaxrb %w0, %2\n" "1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n" " stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 2: case 2:
asm volatile("// __xchg2\n" asm volatile("// __xchg2\n"
"1: ldaxrh %w0, %2\n" "1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n" " stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 4: case 4:
asm volatile("// __xchg4\n" asm volatile("// __xchg4\n"
"1: ldaxr %w0, %2\n" "1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n" " stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
case 8: case 8:
asm volatile("// __xchg8\n" asm volatile("// __xchg8\n"
"1: ldaxr %0, %2\n" "1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n" " stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr) : "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x) : "r" (x)
: "cc", "memory"); : "memory");
break; break;
default: default:
BUILD_BUG(); BUILD_BUG();
} }
smp_mb();
return ret; return ret;
} }

View file

@ -42,7 +42,7 @@
#define ESR_EL1_EC_SP_ALIGN (0x26) #define ESR_EL1_EC_SP_ALIGN (0x26)
#define ESR_EL1_EC_FP_EXC32 (0x28) #define ESR_EL1_EC_FP_EXC32 (0x28)
#define ESR_EL1_EC_FP_EXC64 (0x2C) #define ESR_EL1_EC_FP_EXC64 (0x2C)
#define ESR_EL1_EC_SERRROR (0x2F) #define ESR_EL1_EC_SERROR (0x2F)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30) #define ESR_EL1_EC_BREAKPT_EL0 (0x30)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31) #define ESR_EL1_EC_BREAKPT_EL1 (0x31)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) #define ESR_EL1_EC_SOFTSTP_EL0 (0x32)

View file

@ -24,10 +24,11 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ #define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \ asm volatile( \
"1: ldaxr %w1, %2\n" \ "1: ldxr %w1, %2\n" \
insn "\n" \ insn "\n" \
"2: stlxr %w3, %w0, %2\n" \ "2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \ " cbnz %w3, 1b\n" \
" dmb ish\n" \
"3:\n" \ "3:\n" \
" .pushsection .fixup,\"ax\"\n" \ " .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \ " .align 2\n" \
@ -40,7 +41,7 @@
" .popsection\n" \ " .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \ : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \ : "r" (oparg), "Ir" (-EFAULT) \
: "cc", "memory") : "memory")
static inline int static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT; return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n" asm volatile("// futex_atomic_cmpxchg_inatomic\n"
"1: ldaxr %w1, %2\n" "1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n" " sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n" " cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n" "2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n" " cbnz %w3, 1b\n"
" dmb ish\n"
"3:\n" "3:\n"
" .pushsection .fixup,\"ax\"\n" " .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n" "4: mov %w0, %w6\n"
@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n" " .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp) : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT) : "r" (oldval), "r" (newval), "Ir" (-EFAULT)
: "cc", "memory"); : "memory");
*uval = val; *uval = val;
return ret; return ret;

View file

@ -231,7 +231,7 @@
#define ESR_EL2_EC_SP_ALIGN (0x26) #define ESR_EL2_EC_SP_ALIGN (0x26)
#define ESR_EL2_EC_FP_EXC32 (0x28) #define ESR_EL2_EC_FP_EXC32 (0x28)
#define ESR_EL2_EC_FP_EXC64 (0x2C) #define ESR_EL2_EC_FP_EXC64 (0x2C)
#define ESR_EL2_EC_SERRROR (0x2F) #define ESR_EL2_EC_SERROR (0x2F)
#define ESR_EL2_EC_BREAKPT (0x30) #define ESR_EL2_EC_BREAKPT (0x30)
#define ESR_EL2_EC_BREAKPT_HYP (0x31) #define ESR_EL2_EC_BREAKPT_HYP (0x31)
#define ESR_EL2_EC_SOFTSTP (0x32) #define ESR_EL2_EC_SOFTSTP (0x32)

View file

@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n" " cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000) : "r" (0x80000000)
: "cc", "memory"); : "memory");
} }
static inline int arch_write_trylock(arch_rwlock_t *rw) static inline int arch_write_trylock(arch_rwlock_t *rw)
@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n" "1:\n"
: "=&r" (tmp), "+Q" (rw->lock) : "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000) : "r" (0x80000000)
: "cc", "memory"); : "memory");
return !tmp; return !tmp;
} }
@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n" " cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
} }
static inline void arch_read_unlock(arch_rwlock_t *rw) static inline void arch_read_unlock(arch_rwlock_t *rw)
@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n" " cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
} }
static inline int arch_read_trylock(arch_rwlock_t *rw) static inline int arch_read_trylock(arch_rwlock_t *rw)
@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n" "1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock) : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
: :
: "cc", "memory"); : "memory");
return !tmp2; return !tmp2;
} }

View file

@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg)
__SYSCALL(375, sys_setns) __SYSCALL(375, sys_setns)
__SYSCALL(376, compat_sys_process_vm_readv) __SYSCALL(376, compat_sys_process_vm_readv)
__SYSCALL(377, compat_sys_process_vm_writev) __SYSCALL(377, compat_sys_process_vm_writev)
__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ __SYSCALL(378, sys_kcmp)
__SYSCALL(379, sys_finit_module)
__SYSCALL(380, sys_sched_setattr)
__SYSCALL(381, sys_sched_getattr)
#define __NR_compat_syscalls 379 #define __NR_compat_syscalls 379

View file

@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7} .inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0] .inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1] .inst 0xe1c160d0 // ldrd r6, r7, [r1]
.inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2] .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4 .inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5 .inst 0x00313005 // eoreqs r3, r1, r5
.inst 0x01a23e96 // stlexdeq r3, r6, [r2] .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1 .inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b .inst 0x0afffff9 // beq 1b
.inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7} .inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr .inst 0xe12fff1e // bx lr
@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0
.align 5 .align 5
__kuser_cmpxchg: // 0xffff0fc0 __kuser_cmpxchg: // 0xffff0fc0
.inst 0xe1923e9f // 1: ldaex r3, [r2] .inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0 .inst 0xe0533000 // subs r3, r3, r0
.inst 0x01823e91 // stlexeq r3, r1, [r2] .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1 .inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b .inst 0x0afffffa // beq 1b
.inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0 .inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe12fff1e // bx lr .inst 0xe12fff1e // bx lr

View file

@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->use_syscall = use_syscall; vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) { if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last; vdso_data->cs_cycle_last = tk->clock->cycle_last;
@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_clock_nsec = tk->xtime_nsec; vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult; vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift; vdso_data->cs_shift = tk->shift;
vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
} }
smp_wmb(); smp_wmb();

View file

@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
# Actual build commands # Actual build commands
quiet_cmd_vdsold = VDSOL $@ quiet_cmd_vdsold = VDSOL $@
cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@ quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<

View file

@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
bl __do_get_tspec bl __do_get_tspec
seqcnt_check w9, 1b seqcnt_check w9, 1b
mov x30, x2
cmp w0, #CLOCK_MONOTONIC cmp w0, #CLOCK_MONOTONIC
b.ne 6f b.ne 6f
@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f b.ne 8f
/* xtime_coarse_nsec is already right-shifted */
mov x12, #0
/* Get coarse timespec. */ /* Get coarse timespec. */
adr vdso_data, _vdso_data adr vdso_data, _vdso_data
3: seqcnt_acquire 3: seqcnt_acquire
@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
lsr x11, x11, x12 lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC] stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr mov x0, xzr
ret x2 ret
7: 7:
mov x30, x2 mov x30, x2
8: /* Syscall fallback. */ 8: /* Syscall fallback. */

View file

@ -46,11 +46,12 @@ ENTRY( \name )
mov x2, #1 mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask lsl x4, x2, x3 // Create mask
1: ldaxr x2, [x1] 1: ldxr x2, [x1]
lsr x0, x2, x3 // Save old value of bit lsr x0, x2, x3 // Save old value of bit
\instr x2, x2, x4 // toggle bit \instr x2, x2, x4 // toggle bit
stlxr w5, x2, [x1] stlxr w5, x2, [x1]
cbnz w5, 1b cbnz w5, 1b
dmb ish
and x0, x0, #1 and x0, x0, #1
3: ret 3: ret
ENDPROC(\name ) ENDPROC(\name )

View file

@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_CMA)) { if (IS_ENABLED(CONFIG_DMA_CMA)) {
struct page *page; struct page *page;
size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT, page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size)); get_order(size));
if (!page) if (!page)

View file

@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
/* try section mapping first */ /* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0) if (((addr | next | phys) & ~SECTION_MASK) == 0) {
pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | prot_sect_kernel)); set_pmd(pmd, __pmd(phys | prot_sect_kernel));
else /*
* Check for previous table entries created during
* boot (__create_page_tables) and flush them.
*/
if (!pmd_none(old_pmd))
flush_tlb_all();
} else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys)); alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
}
phys += next - addr; phys += next - addr;
} while (pmd++, addr = next, addr != end); } while (pmd++, addr = next, addr != end);
} }

View file

@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *pgd_alloc(struct mm_struct *mm)
{ {
pgd_t *new_pgd;
if (PGD_SIZE == PAGE_SIZE) if (PGD_SIZE == PAGE_SIZE)
new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else else
new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL); return kzalloc(PGD_SIZE, GFP_KERNEL);
if (!new_pgd)
return NULL;
return new_pgd;
} }
void pgd_free(struct mm_struct *mm, pgd_t *pgd) void pgd_free(struct mm_struct *mm, pgd_t *pgd)

View file

@ -11,7 +11,7 @@
#define NR_syscalls 312 /* length of syscall table */ #define NR_syscalls 314 /* length of syscall table */
/* /*
* The following defines stop scripts/checksyscalls.sh from complaining about * The following defines stop scripts/checksyscalls.sh from complaining about

View file

@ -325,5 +325,7 @@
#define __NR_process_vm_writev 1333 #define __NR_process_vm_writev 1333
#define __NR_accept4 1334 #define __NR_accept4 1334
#define __NR_finit_module 1335 #define __NR_finit_module 1335
#define __NR_sched_setattr 1336
#define __NR_sched_getattr 1337
#endif /* _UAPI_ASM_IA64_UNISTD_H */ #endif /* _UAPI_ASM_IA64_UNISTD_H */

View file

@ -1773,6 +1773,8 @@ sys_call_table:
data8 sys_process_vm_writev data8 sys_process_vm_writev
data8 sys_accept4 data8 sys_accept4
data8 sys_finit_module // 1335 data8 sys_finit_module // 1335
data8 sys_sched_setattr
data8 sys_sched_getattr
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ #endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */

View file

@ -534,13 +534,10 @@ static int __init db1000_dev_init(void)
s0 = AU1100_GPIO1_INT; s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT; s1 = AU1100_GPIO4_INT;
gpio_request(19, "sd0_cd");
gpio_request(20, "sd1_cd");
gpio_direction_input(19); /* sd0 cd# */ gpio_direction_input(19); /* sd0 cd# */
gpio_direction_input(20); /* sd1 cd# */ gpio_direction_input(20); /* sd1 cd# */
gpio_direction_input(21); /* touch pendown# */
gpio_direction_input(207); /* SPI MISO */
gpio_direction_output(208, 0); /* SPI MOSI */
gpio_direction_output(209, 1); /* SPI SCK */
gpio_direction_output(210, 1); /* SPI CS# */
/* spi_gpio on SSI0 pins */ /* spi_gpio on SSI0 pins */
pfc = __raw_readl((void __iomem *)SYS_PINFUNC); pfc = __raw_readl((void __iomem *)SYS_PINFUNC);

View file

@ -74,6 +74,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
default: default:
BUG(); BUG();
} }
return SIGFPE;
} }
#define __disable_fpu() \ #define __disable_fpu() \

View file

@ -369,16 +369,18 @@
#define __NR_process_vm_writev (__NR_Linux + 346) #define __NR_process_vm_writev (__NR_Linux + 346)
#define __NR_kcmp (__NR_Linux + 347) #define __NR_kcmp (__NR_Linux + 347)
#define __NR_finit_module (__NR_Linux + 348) #define __NR_finit_module (__NR_Linux + 348)
#define __NR_sched_setattr (__NR_Linux + 349)
#define __NR_sched_getattr (__NR_Linux + 350)
/* /*
* Offset of the last Linux o32 flavoured syscall * Offset of the last Linux o32 flavoured syscall
*/ */
#define __NR_Linux_syscalls 348 #define __NR_Linux_syscalls 350
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000 #define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 348 #define __NR_O32_Linux_syscalls 350
#if _MIPS_SIM == _MIPS_SIM_ABI64 #if _MIPS_SIM == _MIPS_SIM_ABI64
@ -695,16 +697,18 @@
#define __NR_kcmp (__NR_Linux + 306) #define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307) #define __NR_finit_module (__NR_Linux + 307)
#define __NR_getdents64 (__NR_Linux + 308) #define __NR_getdents64 (__NR_Linux + 308)
#define __NR_sched_setattr (__NR_Linux + 309)
#define __NR_sched_getattr (__NR_Linux + 310)
/* /*
* Offset of the last Linux 64-bit flavoured syscall * Offset of the last Linux 64-bit flavoured syscall
*/ */
#define __NR_Linux_syscalls 308 #define __NR_Linux_syscalls 310
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000 #define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 308 #define __NR_64_Linux_syscalls 310
#if _MIPS_SIM == _MIPS_SIM_NABI32 #if _MIPS_SIM == _MIPS_SIM_NABI32
@ -1025,15 +1029,17 @@
#define __NR_process_vm_writev (__NR_Linux + 310) #define __NR_process_vm_writev (__NR_Linux + 310)
#define __NR_kcmp (__NR_Linux + 311) #define __NR_kcmp (__NR_Linux + 311)
#define __NR_finit_module (__NR_Linux + 312) #define __NR_finit_module (__NR_Linux + 312)
#define __NR_sched_setattr (__NR_Linux + 313)
#define __NR_sched_getattr (__NR_Linux + 314)
/* /*
* Offset of the last N32 flavoured syscall * Offset of the last N32 flavoured syscall
*/ */
#define __NR_Linux_syscalls 312 #define __NR_Linux_syscalls 314
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000 #define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 312 #define __NR_N32_Linux_syscalls 314
#endif /* _UAPI_ASM_UNISTD_H */ #endif /* _UAPI_ASM_UNISTD_H */

View file

@ -563,3 +563,5 @@ EXPORT(sys_call_table)
PTR sys_process_vm_writev PTR sys_process_vm_writev
PTR sys_kcmp PTR sys_kcmp
PTR sys_finit_module PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr /* 4350 */

View file

@ -425,4 +425,6 @@ EXPORT(sys_call_table)
PTR sys_kcmp PTR sys_kcmp
PTR sys_finit_module PTR sys_finit_module
PTR sys_getdents64 PTR sys_getdents64
PTR sys_sched_setattr
PTR sys_sched_getattr /* 5310 */
.size sys_call_table,.-sys_call_table .size sys_call_table,.-sys_call_table

View file

@ -418,4 +418,6 @@ EXPORT(sysn32_call_table)
PTR compat_sys_process_vm_writev /* 6310 */ PTR compat_sys_process_vm_writev /* 6310 */
PTR sys_kcmp PTR sys_kcmp
PTR sys_finit_module PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr
.size sysn32_call_table,.-sysn32_call_table .size sysn32_call_table,.-sysn32_call_table

View file

@ -541,4 +541,6 @@ EXPORT(sys32_call_table)
PTR compat_sys_process_vm_writev PTR compat_sys_process_vm_writev
PTR sys_kcmp PTR sys_kcmp
PTR sys_finit_module PTR sys_finit_module
PTR sys_sched_setattr
PTR sys_sched_getattr /* 4350 */
.size sys32_call_table,.-sys32_call_table .size sys32_call_table,.-sys32_call_table

View file

@ -33,22 +33,9 @@
int hpux_execve(struct pt_regs *regs) int hpux_execve(struct pt_regs *regs)
{ {
int error; return do_execve(getname((const char __user *) regs->gr[26]),
struct filename *filename;
filename = getname((const char __user *) regs->gr[26]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename->name,
(const char __user *const __user *) regs->gr[25], (const char __user *const __user *) regs->gr[25],
(const char __user *const __user *) regs->gr[24]); (const char __user *const __user *) regs->gr[24]);
putname(filename);
out:
return error;
} }
struct hpux_dirent { struct hpux_dirent {

View file

@ -25,6 +25,7 @@
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/spinlock.h>
#include "crypt_s390.h" #include "crypt_s390.h"
#define AES_KEYLEN_128 1 #define AES_KEYLEN_128 1
@ -32,6 +33,7 @@
#define AES_KEYLEN_256 4 #define AES_KEYLEN_256 4
static u8 *ctrblk; static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static char keylen_flag; static char keylen_flag;
struct s390_aes_ctx { struct s390_aes_ctx {
@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len); return aes_set_key(tfm, in_key, key_len);
} }
static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
{
unsigned int i, n;
/* only use complete blocks, max. PAGE_SIZE */
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
}
return n;
}
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
struct s390_aes_ctx *sctx, struct blkcipher_walk *walk) struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{ {
int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
unsigned int i, n, nbytes; unsigned int n, nbytes;
u8 buf[AES_BLOCK_SIZE]; u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
u8 *out, *in; u8 *out, *in, *ctrptr = ctrbuf;
if (!walk->nbytes) if (!walk->nbytes)
return ret; return ret;
memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE); if (spin_trylock(&ctrblk_lock))
ctrptr = ctrblk;
memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
out = walk->dst.virt.addr; out = walk->dst.virt.addr;
in = walk->src.virt.addr; in = walk->src.virt.addr;
while (nbytes >= AES_BLOCK_SIZE) { while (nbytes >= AES_BLOCK_SIZE) {
/* only use complete blocks, max. PAGE_SIZE */ if (ctrptr == ctrblk)
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : n = __ctrblk_init(ctrptr, nbytes);
nbytes & ~(AES_BLOCK_SIZE - 1); else
for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) { n = AES_BLOCK_SIZE;
memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE, ret = crypt_s390_kmctr(func, sctx->key, out, in,
AES_BLOCK_SIZE); n, ctrptr);
crypto_inc(ctrblk + i, AES_BLOCK_SIZE); if (ret < 0 || ret != n) {
} if (ctrptr == ctrblk)
ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk); spin_unlock(&ctrblk_lock);
if (ret < 0 || ret != n)
return -EIO; return -EIO;
}
if (n > AES_BLOCK_SIZE) if (n > AES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE, memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE); AES_BLOCK_SIZE);
crypto_inc(ctrblk, AES_BLOCK_SIZE); crypto_inc(ctrptr, AES_BLOCK_SIZE);
out += n; out += n;
in += n; in += n;
nbytes -= n; nbytes -= n;
} }
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
} }
if (ctrptr == ctrblk) {
if (nbytes)
memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
else
memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
spin_unlock(&ctrblk_lock);
}
/* /*
* final block may be < AES_BLOCK_SIZE, copy only nbytes * final block may be < AES_BLOCK_SIZE, copy only nbytes
*/ */
@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr; out = walk->dst.virt.addr;
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in, ret = crypt_s390_kmctr(func, sctx->key, buf, in,
AES_BLOCK_SIZE, ctrblk); AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE) if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO; return -EIO;
memcpy(out, buf, nbytes); memcpy(out, buf, nbytes);
crypto_inc(ctrblk, AES_BLOCK_SIZE); crypto_inc(ctrbuf, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0); ret = blkcipher_walk_done(desc, walk, 0);
memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
} }
memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
return ret; return ret;
} }

View file

@ -25,6 +25,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE) #define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk; static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
struct s390_des_ctx { struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE]; u8 iv[DES_BLOCK_SIZE];
@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
} }
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
u8 *iv, struct blkcipher_walk *walk) struct blkcipher_walk *walk)
{ {
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk); int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes; unsigned int nbytes = walk->nbytes;
struct {
u8 iv[DES_BLOCK_SIZE];
u8 key[DES3_KEY_SIZE];
} param;
if (!nbytes) if (!nbytes)
goto out; goto out;
memcpy(iv, walk->iv, DES_BLOCK_SIZE); memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
memcpy(param.key, ctx->key, DES3_KEY_SIZE);
do { do {
/* only use complete blocks */ /* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1); unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr; u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr; u8 *in = walk->src.virt.addr;
ret = crypt_s390_kmc(func, iv, out, in, n); ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n) if (ret < 0 || ret != n)
return -EIO; return -EIO;
nbytes &= DES_BLOCK_SIZE - 1; nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes)); } while ((nbytes = walk->nbytes));
memcpy(walk->iv, iv, DES_BLOCK_SIZE); memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
out: out:
return ret; return ret;
@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
{ {
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk; struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk); return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
} }
static int cbc_des_decrypt(struct blkcipher_desc *desc, static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
{ {
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk; struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk); return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
} }
static struct crypto_alg cbc_des_alg = { static struct crypto_alg cbc_des_alg = {
@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
{ {
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk; struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk); return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
} }
static int cbc_des3_decrypt(struct blkcipher_desc *desc, static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src, struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes) unsigned int nbytes)
{ {
struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk; struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes); blkcipher_walk_init(&walk, dst, src, nbytes);
return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk); return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
} }
static struct crypto_alg cbc_des3_alg = { static struct crypto_alg cbc_des3_alg = {
@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
} }
}; };
static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
{
unsigned int i, n;
/* align to block size, max. PAGE_SIZE */
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
}
return n;
}
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
struct s390_des_ctx *ctx, struct blkcipher_walk *walk) struct s390_des_ctx *ctx,
struct blkcipher_walk *walk)
{ {
int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
unsigned int i, n, nbytes; unsigned int n, nbytes;
u8 buf[DES_BLOCK_SIZE]; u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
u8 *out, *in; u8 *out, *in, *ctrptr = ctrbuf;
memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE); if (!walk->nbytes)
return ret;
if (spin_trylock(&ctrblk_lock))
ctrptr = ctrblk;
memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) { while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
out = walk->dst.virt.addr; out = walk->dst.virt.addr;
in = walk->src.virt.addr; in = walk->src.virt.addr;
while (nbytes >= DES_BLOCK_SIZE) { while (nbytes >= DES_BLOCK_SIZE) {
/* align to block size, max. PAGE_SIZE */ if (ctrptr == ctrblk)
n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : n = __ctrblk_init(ctrptr, nbytes);
nbytes & ~(DES_BLOCK_SIZE - 1); else
for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) { n = DES_BLOCK_SIZE;
memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE, ret = crypt_s390_kmctr(func, ctx->key, out, in,
DES_BLOCK_SIZE); n, ctrptr);
crypto_inc(ctrblk + i, DES_BLOCK_SIZE); if (ret < 0 || ret != n) {
} if (ctrptr == ctrblk)
ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk); spin_unlock(&ctrblk_lock);
if (ret < 0 || ret != n)
return -EIO; return -EIO;
}
if (n > DES_BLOCK_SIZE) if (n > DES_BLOCK_SIZE)
memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE, memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE); DES_BLOCK_SIZE);
crypto_inc(ctrblk, DES_BLOCK_SIZE); crypto_inc(ctrptr, DES_BLOCK_SIZE);
out += n; out += n;
in += n; in += n;
nbytes -= n; nbytes -= n;
} }
ret = blkcipher_walk_done(desc, walk, nbytes); ret = blkcipher_walk_done(desc, walk, nbytes);
} }
if (ctrptr == ctrblk) {
if (nbytes)
memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
else
memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
spin_unlock(&ctrblk_lock);
}
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */ /* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) { if (nbytes) {
out = walk->dst.virt.addr; out = walk->dst.virt.addr;
in = walk->src.virt.addr; in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in, ret = crypt_s390_kmctr(func, ctx->key, buf, in,
DES_BLOCK_SIZE, ctrblk); DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE) if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO; return -EIO;
memcpy(out, buf, nbytes); memcpy(out, buf, nbytes);
crypto_inc(ctrblk, DES_BLOCK_SIZE); crypto_inc(ctrbuf, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0); ret = blkcipher_walk_done(desc, walk, 0);
memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
} }
memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
return ret; return ret;
} }

View file

@ -444,6 +444,7 @@ config X86_INTEL_MID
bool "Intel MID platform support" bool "Intel MID platform support"
depends on X86_32 depends on X86_32
depends on X86_EXTENDED_PLATFORM depends on X86_EXTENDED_PLATFORM
depends on X86_PLATFORM_DEVICES
depends on PCI depends on PCI
depends on PCI_GOANY depends on PCI_GOANY
depends on X86_IO_APIC depends on X86_IO_APIC
@ -1051,9 +1052,9 @@ config MICROCODE_INTEL
This options enables microcode patch loading support for Intel This options enables microcode patch loading support for Intel
processors. processors.
For latest news and information on obtaining all the required For the current Intel microcode data package go to
Intel ingredients for this driver, check: <https://downloadcenter.intel.com> and search for
<http://www.urbanmyth.org/microcode/>. 'Linux Processor Microcode Data File'.
config MICROCODE_AMD config MICROCODE_AMD
bool "AMD microcode loading support" bool "AMD microcode loading support"

View file

@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT
config X86_DECODER_SELFTEST config X86_DECODER_SELFTEST
bool "x86 instruction decoder selftest" bool "x86 instruction decoder selftest"
depends on DEBUG_KERNEL && KPROBES depends on DEBUG_KERNEL && KPROBES
depends on !COMPILE_TEST
---help--- ---help---
Perform x86 instruction decoder selftests at build time. Perform x86 instruction decoder selftests at build time.
This option is useful for checking the sanity of x86 instruction This option is useful for checking the sanity of x86 instruction

View file

@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void); extern void amd_flush_garts(void);
extern int amd_numa_init(void); extern int amd_numa_init(void);
extern int amd_get_subcaches(int); extern int amd_get_subcaches(int);
extern int amd_set_subcaches(int, int); extern int amd_set_subcaches(int, unsigned long);
struct amd_l3_cache { struct amd_l3_cache {
unsigned indices; unsigned indices;

View file

@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
static inline void __flush_tlb_one(unsigned long addr) static inline void __flush_tlb_one(unsigned long addr)
{ {
count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr); __flush_tlb_single(addr);
} }
@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
*/ */
static inline void __flush_tlb_up(void) static inline void __flush_tlb_up(void)
{ {
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); __flush_tlb();
} }
static inline void flush_tlb_all(void) static inline void flush_tlb_all(void)
{ {
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all(); __flush_tlb_all();
} }

View file

@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page, extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op); struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page, extern int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op, struct gnttab_map_grant_ref *kmap_op);
unsigned long mfn);
extern struct page *m2p_find_override(unsigned long mfn); extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
pfn = m2p_find_override_pfn(mfn, ~0); pfn = m2p_find_override_pfn(mfn, ~0);
} }
/* /*
* pfn is ~0 if there are no entries in the m2p for mfn or if the * pfn is ~0 if there are no entries in the m2p for mfn or if the
* entry doesn't map back to the mfn and m2p_override doesn't have a * entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it. * valid entry for it.

View file

@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu)
return (mask >> (4 * cuid)) & 0xf; return (mask >> (4 * cuid)) & 0xf;
} }
int amd_set_subcaches(int cpu, int mask) int amd_set_subcaches(int cpu, unsigned long mask)
{ {
static unsigned int reset, ban; static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu)); struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));

View file

@ -767,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
{ {
tlb_flushall_shift = 5; tlb_flushall_shift = 6;
if (c->x86 <= 0x11)
tlb_flushall_shift = 4;
} }
static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)

View file

@ -640,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
case 0x61d: /* six-core 45 nm xeon "Dunnington" */ case 0x61d: /* six-core 45 nm xeon "Dunnington" */
tlb_flushall_shift = -1; tlb_flushall_shift = -1;
break; break;
case 0x63a: /* Ivybridge */
tlb_flushall_shift = 2;
break;
case 0x61a: /* 45 nm nehalem, "Bloomfield" */ case 0x61a: /* 45 nm nehalem, "Bloomfield" */
case 0x61e: /* 45 nm nehalem, "Lynnfield" */ case 0x61e: /* 45 nm nehalem, "Lynnfield" */
case 0x625: /* 32 nm nehalem, "Clarkdale" */ case 0x625: /* 32 nm nehalem, "Clarkdale" */
case 0x62c: /* 32 nm nehalem, "Gulftown" */ case 0x62c: /* 32 nm nehalem, "Gulftown" */
case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
case 0x62f: /* 32 nm Xeon E7 */ case 0x62f: /* 32 nm Xeon E7 */
tlb_flushall_shift = 6;
break;
case 0x62a: /* SandyBridge */ case 0x62a: /* SandyBridge */
case 0x62d: /* SandyBridge, "Romely-EP" */ case 0x62d: /* SandyBridge, "Romely-EP" */
tlb_flushall_shift = 5;
break;
case 0x63a: /* Ivybridge */
tlb_flushall_shift = 1;
break;
default: default:
tlb_flushall_shift = 6; tlb_flushall_shift = 6;
} }

View file

@ -285,6 +285,15 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
uci->cpu_sig.sig = cpuid_eax(0x00000001); uci->cpu_sig.sig = cpuid_eax(0x00000001);
} }
static void __init get_bsp_sig(void)
{
unsigned int bsp = boot_cpu_data.cpu_index;
struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
if (!uci->cpu_sig.sig)
smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
}
#else #else
void load_ucode_amd_ap(void) void load_ucode_amd_ap(void)
{ {
@ -337,31 +346,37 @@ void load_ucode_amd_ap(void)
int __init save_microcode_in_initrd_amd(void) int __init save_microcode_in_initrd_amd(void)
{ {
unsigned long cont;
enum ucode_state ret; enum ucode_state ret;
u32 eax; u32 eax;
#ifdef CONFIG_X86_32 if (!container)
unsigned int bsp = boot_cpu_data.cpu_index; return -EINVAL;
struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
if (!uci->cpu_sig.sig) #ifdef CONFIG_X86_32
smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1); get_bsp_sig();
cont = (unsigned long)container;
#else
/*
* We need the physical address of the container for both bitness since
* boot_params.hdr.ramdisk_image is a physical address.
*/
cont = __pa(container);
#endif
/* /*
* Take into account the fact that the ramdisk might get relocated * Take into account the fact that the ramdisk might get relocated and
* and therefore we need to recompute the container's position in * therefore we need to recompute the container's position in virtual
* virtual memory space. * memory space.
*/ */
container = (u8 *)(__va((u32)relocated_ramdisk) + if (relocated_ramdisk)
((u32)container - boot_params.hdr.ramdisk_image)); container = (u8 *)(__va(relocated_ramdisk) +
#endif (cont - boot_params.hdr.ramdisk_image));
if (ucode_new_rev) if (ucode_new_rev)
pr_info("microcode: updated early to new patch_level=0x%08x\n", pr_info("microcode: updated early to new patch_level=0x%08x\n",
ucode_new_rev); ucode_new_rev);
if (!container)
return -EINVAL;
eax = cpuid_eax(0x00000001); eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);

View file

@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
} }
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */ /* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); __flush_tlb();
/* Save MTRR state */ /* Save MTRR state */
@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
static void post_set(void) __releases(set_atomicity_lock) static void post_set(void) __releases(set_atomicity_lock)
{ {
/* Flush TLBs (no need to flush caches - they are disabled) */ /* Flush TLBs (no need to flush caches - they are disabled) */
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb(); __flush_tlb();
/* Intel (P6) standard MTRRs */ /* Intel (P6) standard MTRRs */

View file

@ -266,6 +266,14 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
* below, which is protected by stop_machine(). Putting them on the stack
* results in a stack frame overflow. Dynamically allocating could result in a
* failure so declare these two cpumasks as global.
*/
static struct cpumask affinity_new, online_new;
/* /*
* This cpu is going to be removed and its vectors migrated to the remaining * This cpu is going to be removed and its vectors migrated to the remaining
* online cpus. Check to see if there are enough vectors in the remaining cpus. * online cpus. Check to see if there are enough vectors in the remaining cpus.
@ -277,7 +285,6 @@ int check_irq_vectors_for_cpu_disable(void)
unsigned int this_cpu, vector, this_count, count; unsigned int this_cpu, vector, this_count, count;
struct irq_desc *desc; struct irq_desc *desc;
struct irq_data *data; struct irq_data *data;
struct cpumask affinity_new, online_new;
this_cpu = smp_processor_id(); this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask); cpumask_copy(&online_new, cpu_online_mask);

View file

@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
quirk_amd_nb_node); quirk_amd_nb_node);
#endif #endif
#ifdef CONFIG_PCI
/*
* Processor does not ensure DRAM scrub read/write sequence
* is atomic wrt accesses to CC6 save state area. Therefore
* if a concurrent scrub read/write access is to same address
* the entry may appear as if it is not written. This quirk
* applies to Fam16h models 00h-0Fh
*
* See "Revision Guide" for AMD F16h models 00h-0fh,
* document 51810 rev. 3.04, Nov 2013
*/
static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
{
u32 val;
/*
* Suggested workaround:
* set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
*/
pci_read_config_dword(dev, 0x58, &val);
if (val & 0x1F) {
val &= ~(0x1F);
pci_write_config_dword(dev, 0x58, val);
}
pci_read_config_dword(dev, 0x5C, &val);
if (val & BIT(0)) {
val &= ~BIT(0);
pci_write_config_dword(dev, 0x5c, val);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
amd_disable_seq_and_redirect_scrub);
#endif

View file

@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
struct numa_memblk *mb = &mi->blk[i]; struct numa_memblk *mb = &mi->blk[i];
memblock_set_node(mb->start, mb->end - mb->start, memblock_set_node(mb->start, mb->end - mb->start,
&memblock.memory, mb->nid); &memblock.memory, mb->nid);
/*
* At this time, all memory regions reserved by memblock are
* used by the kernel. Set the nid in memblock.reserved will
* mark out all the nodes the kernel resides in.
*/
memblock_set_node(mb->start, mb->end - mb->start,
&memblock.reserved, mb->nid);
} }
/* /*
@ -565,10 +557,21 @@ static void __init numa_init_array(void)
static void __init numa_clear_kernel_node_hotplug(void) static void __init numa_clear_kernel_node_hotplug(void)
{ {
int i, nid; int i, nid;
nodemask_t numa_kernel_nodes; nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
unsigned long start, end; unsigned long start, end;
struct memblock_type *type = &memblock.reserved; struct memblock_type *type = &memblock.reserved;
/*
* At this time, all memory regions reserved by memblock are
* used by the kernel. Set the nid in memblock.reserved will
* mark out all the nodes the kernel resides in.
*/
for (i = 0; i < numa_meminfo.nr_blks; i++) {
struct numa_memblk *mb = &numa_meminfo.blk[i];
memblock_set_node(mb->start, mb->end - mb->start,
&memblock.reserved, mb->nid);
}
/* Mark all kernel nodes. */ /* Mark all kernel nodes. */
for (i = 0; i < type->cnt; i++) for (i = 0; i < type->cnt; i++)
node_set(type->regions[i].nid, numa_kernel_nodes); node_set(type->regions[i].nid, numa_kernel_nodes);

View file

@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
nid, start, end); nid, start, end);
printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid); printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
printk(KERN_DEBUG " "); printk(KERN_DEBUG " ");
start = round_down(start, PAGES_PER_SECTION);
end = round_up(end, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
physnode_map[pfn / PAGES_PER_SECTION] = nid; physnode_map[pfn / PAGES_PER_SECTION] = nid;
printk(KERN_CONT "%lx ", pfn); printk(KERN_CONT "%lx ", pfn);

View file

@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
return acpi_numa < 0; return acpi_numa < 0;
} }
/* Callback for SLIT parsing */ /*
* Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
* I/O localities since SRAT does not list them. I/O localities are
* not supported at this point.
*/
void __init acpi_numa_slit_init(struct acpi_table_slit *slit) void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{ {
int i, j; int i, j;
for (i = 0; i < slit->locality_count; i++) for (i = 0; i < slit->locality_count; i++) {
for (j = 0; j < slit->locality_count; j++) if (pxm_to_node(i) == NUMA_NO_NODE)
continue;
for (j = 0; j < slit->locality_count; j++) {
if (pxm_to_node(j) == NUMA_NO_NODE)
continue;
numa_set_distance(pxm_to_node(i), pxm_to_node(j), numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]); slit->entry[slit->locality_count * i + j]);
}
}
} }
/* Callback for Proximity Domain -> x2APIC mapping */ /* Callback for Proximity Domain -> x2APIC mapping */

View file

@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return; return;
count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_end == TLB_FLUSH_ALL) if (f->flush_end == TLB_FLUSH_ALL)
local_flush_tlb(); local_flush_tlb();
@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
info.flush_start = start; info.flush_start = start;
info.flush_end = end; info.flush_end = end;
count_vm_event(NR_TLB_REMOTE_FLUSH); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (is_uv_system()) { if (is_uv_system()) {
unsigned int cpu; unsigned int cpu;
@ -151,44 +151,19 @@ void flush_tlb_current_task(void)
preempt_disable(); preempt_disable();
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable(); preempt_enable();
} }
/*
* It can find out the THP large page, or
* HUGETLB page in tlb_flush when THP disabled
*/
static inline unsigned long has_large_page(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
unsigned long addr = ALIGN(start, HPAGE_SIZE);
for (; addr < end; addr += HPAGE_SIZE) {
pgd = pgd_offset(mm, addr);
if (likely(!pgd_none(*pgd))) {
pud = pud_offset(pgd, addr);
if (likely(!pud_none(*pud))) {
pmd = pmd_offset(pud, addr);
if (likely(!pmd_none(*pmd)))
if (pmd_large(*pmd))
return addr;
}
}
}
return 0;
}
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag) unsigned long end, unsigned long vmflag)
{ {
unsigned long addr; unsigned long addr;
unsigned act_entries, tlb_entries = 0; unsigned act_entries, tlb_entries = 0;
unsigned long nr_base_pages;
preempt_disable(); preempt_disable();
if (current->active_mm != mm) if (current->active_mm != mm)
@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
tlb_entries = tlb_lli_4k[ENTRIES]; tlb_entries = tlb_lli_4k[ENTRIES];
else else
tlb_entries = tlb_lld_4k[ENTRIES]; tlb_entries = tlb_lld_4k[ENTRIES];
/* Assume all of TLB entries was occupied by this task */ /* Assume all of TLB entries was occupied by this task */
act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm; act_entries = tlb_entries >> tlb_flushall_shift;
act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
nr_base_pages = (end - start) >> PAGE_SHIFT;
/* tlb_flushall_shift is on balance point, details in commit log */ /* tlb_flushall_shift is on balance point, details in commit log */
if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) { if (nr_base_pages > act_entries) {
count_vm_event(NR_TLB_LOCAL_FLUSH_ALL); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb(); local_flush_tlb();
} else { } else {
if (has_large_page(mm, start, end)) {
local_flush_tlb();
goto flush_all;
}
/* flush range by one by one 'invlpg' */ /* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) { for (addr = start; addr < end; addr += PAGE_SIZE) {
count_vm_event(NR_TLB_LOCAL_FLUSH_ONE); count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr); __flush_tlb_single(addr);
} }
@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
static void do_flush_tlb_all(void *info) static void do_flush_tlb_all(void *info)
{ {
count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all(); __flush_tlb_all();
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY) if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id()); leave_mm(smp_processor_id());
@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void) void flush_tlb_all(void)
{ {
count_vm_event(NR_TLB_REMOTE_FLUSH); count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
on_each_cpu(do_flush_tlb_all, NULL, 1); on_each_cpu(do_flush_tlb_all, NULL, 1);
} }

View file

@ -49,7 +49,8 @@ void __init efi_bgrt_init(void)
image = efi_lookup_mapped_addr(bgrt_tab->image_address); image = efi_lookup_mapped_addr(bgrt_tab->image_address);
if (!image) { if (!image) {
image = ioremap(bgrt_tab->image_address, sizeof(bmp_header)); image = early_memremap(bgrt_tab->image_address,
sizeof(bmp_header));
ioremapped = true; ioremapped = true;
if (!image) if (!image)
return; return;
@ -57,7 +58,7 @@ void __init efi_bgrt_init(void)
memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
if (ioremapped) if (ioremapped)
iounmap(image); early_iounmap(image, sizeof(bmp_header));
bgrt_image_size = bmp_header.size; bgrt_image_size = bmp_header.size;
bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL);
@ -65,7 +66,8 @@ void __init efi_bgrt_init(void)
return; return;
if (ioremapped) { if (ioremapped) {
image = ioremap(bgrt_tab->image_address, bmp_header.size); image = early_memremap(bgrt_tab->image_address,
bmp_header.size);
if (!image) { if (!image) {
kfree(bgrt_image); kfree(bgrt_image);
bgrt_image = NULL; bgrt_image = NULL;
@ -75,5 +77,5 @@ void __init efi_bgrt_init(void)
memcpy_fromio(bgrt_image, image, bgrt_image_size); memcpy_fromio(bgrt_image, image, bgrt_image_size);
if (ioremapped) if (ioremapped)
iounmap(image); early_iounmap(image, bmp_header.size);
} }

View file

@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu)
* X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
* (which PVH shared codepaths), while X86_CR0_PG is for PVH. */ * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM); write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
if (!cpu)
return;
/*
* For BSP, PSE PGE are set in probe_page_size_mask(), for APs
* set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
*/
if (cpu_has_pse)
set_in_cr4(X86_CR4_PSE);
if (cpu_has_pge)
set_in_cr4(X86_CR4_PGE);
} }
/* /*

View file

@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page,
"m2p_add_override: pfn %lx not mapped", pfn)) "m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL; return -EINVAL;
} }
WARN_ON(PagePrivate(page));
SetPagePrivate(page);
set_page_private(page, mfn);
page->index = pfn_to_mfn(pfn);
if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
return -ENOMEM;
if (kmap_op != NULL) { if (kmap_op != NULL) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
} }
EXPORT_SYMBOL_GPL(m2p_add_override); EXPORT_SYMBOL_GPL(m2p_add_override);
int m2p_remove_override(struct page *page, int m2p_remove_override(struct page *page,
struct gnttab_map_grant_ref *kmap_op, struct gnttab_map_grant_ref *kmap_op)
unsigned long mfn)
{ {
unsigned long flags; unsigned long flags;
unsigned long mfn;
unsigned long pfn; unsigned long pfn;
unsigned long uninitialized_var(address); unsigned long uninitialized_var(address);
unsigned level; unsigned level;
pte_t *ptep = NULL; pte_t *ptep = NULL;
pfn = page_to_pfn(page); pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
return -EINVAL;
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT); address = (unsigned long)__va(pfn << PAGE_SHIFT);
@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page,
spin_lock_irqsave(&m2p_override_lock, flags); spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru); list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags); spin_unlock_irqrestore(&m2p_override_lock, flags);
WARN_ON(!PagePrivate(page));
ClearPagePrivate(page);
set_phys_to_machine(pfn, page->index);
if (kmap_op != NULL) { if (kmap_op != NULL) {
if (!PageHighMem(page)) { if (!PageHighMem(page)) {
struct multicall_space mcs; struct multicall_space mcs;

1248
dir.c Normal file

File diff suppressed because it is too large Load diff

View file

@ -549,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{ {
unsigned long x; unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev)); struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
if (sscanf(buf, "%ld\n", &x) == 1) if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm = x/1000; battery->alarm = x/1000;
if (acpi_battery_present(battery)) if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery); acpi_battery_set_alarm(battery);

View file

@ -60,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n", seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.run_wake ? '*' : ' ', dev->wakeup.flags.run_wake ? '*' : ' ',
(device_may_wakeup(&dev->dev) || (device_may_wakeup(&dev->dev) ||
(ldev && device_may_wakeup(ldev))) ? device_may_wakeup(ldev)) ?
"enabled" : "disabled", "enabled" : "disabled",
ldev->bus ? ldev->bus->name : ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev)); "no-bus", dev_name(ldev));

View file

@ -484,7 +484,6 @@ static void acpi_device_hotplug(void *data, u32 src)
static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data) static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
{ {
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
struct acpi_scan_handler *handler = data;
struct acpi_device *adev; struct acpi_device *adev;
acpi_status status; acpi_status status;
@ -500,7 +499,10 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
break; break;
case ACPI_NOTIFY_EJECT_REQUEST: case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n"); acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
if (!handler->hotplug.enabled) { if (!adev->handler)
goto err_out;
if (!adev->handler->hotplug.enabled) {
acpi_handle_err(handle, "Eject disabled\n"); acpi_handle_err(handle, "Eject disabled\n");
ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
goto err_out; goto err_out;

View file

@ -99,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
union acpi_object *element = &(package->package.elements[i]); union acpi_object *element = &(package->package.elements[i]);
if (!element) {
return AE_BAD_DATA;
}
switch (element->type) { switch (element->type) {
case ACPI_TYPE_INTEGER: case ACPI_TYPE_INTEGER:

View file

@ -170,6 +170,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
}, },
{ {
.callback = video_detect_force_vendor, .callback = video_detect_force_vendor,
.ident = "HP EliteBook Revolve 810",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook Revolve 810 G1"),
},
},
{
.callback = video_detect_force_vendor,
.ident = "Lenovo Yoga 13", .ident = "Lenovo Yoga 13",
.matches = { .matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),

File diff suppressed because it is too large Load diff

View file

@ -25,6 +25,7 @@
#include <linux/bio.h> #include <linux/bio.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/compat.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/fs.h> #include <linux/fs.h>
@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return retcode; return retcode;
} }
#ifdef CONFIG_COMPAT
typedef struct sg_io_hdr32 {
compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
compat_int_t dxfer_direction; /* [i] data transfer direction */
unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
unsigned char mx_sb_len; /* [i] max length to write to sbp */
unsigned short iovec_count; /* [i] 0 implies no scatter gather */
compat_uint_t dxfer_len; /* [i] byte count of data transfer */
compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
or scatter gather list */
compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
compat_int_t pack_id; /* [i->o] unused internally (normally) */
compat_uptr_t usr_ptr; /* [i->o] unused internally */
unsigned char status; /* [o] scsi status */
unsigned char masked_status; /* [o] shifted, masked scsi status */
unsigned char msg_status; /* [o] messaging level data (optional) */
unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
unsigned short host_status; /* [o] errors from host adapter */
unsigned short driver_status; /* [o] errors from software driver */
compat_int_t resid; /* [o] dxfer_len - actual_transferred */
compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
compat_uint_t info; /* [o] auxiliary information */
} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
typedef struct sg_iovec32 {
compat_uint_t iov_base;
compat_uint_t iov_len;
} sg_iovec32_t;
static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
{
sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
sg_iovec32_t __user *iov32 = dxferp;
int i;
for (i = 0; i < iovec_count; i++) {
u32 base, len;
if (get_user(base, &iov32[i].iov_base) ||
get_user(len, &iov32[i].iov_len) ||
put_user(compat_ptr(base), &iov[i].iov_base) ||
put_user(len, &iov[i].iov_len))
return -EFAULT;
}
if (put_user(iov, &sgio->dxferp))
return -EFAULT;
return 0;
}
int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
{
sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
sg_io_hdr_t __user *sgio;
u16 iovec_count;
u32 data;
void __user *dxferp;
int err;
int interface_id;
if (get_user(interface_id, &sgio32->interface_id))
return -EFAULT;
if (interface_id != 'S')
return -EINVAL;
if (get_user(iovec_count, &sgio32->iovec_count))
return -EFAULT;
{
void __user *top = compat_alloc_user_space(0);
void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
(iovec_count * sizeof(sg_iovec_t)));
if (new > top)
return -EINVAL;
sgio = new;
}
/* Ok, now construct. */
if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
(2 * sizeof(int)) +
(2 * sizeof(unsigned char)) +
(1 * sizeof(unsigned short)) +
(1 * sizeof(unsigned int))))
return -EFAULT;
if (get_user(data, &sgio32->dxferp))
return -EFAULT;
dxferp = compat_ptr(data);
if (iovec_count) {
if (sg_build_iovec(sgio, dxferp, iovec_count))
return -EFAULT;
} else {
if (put_user(dxferp, &sgio->dxferp))
return -EFAULT;
}
{
unsigned char __user *cmdp;
unsigned char __user *sbp;
if (get_user(data, &sgio32->cmdp))
return -EFAULT;
cmdp = compat_ptr(data);
if (get_user(data, &sgio32->sbp))
return -EFAULT;
sbp = compat_ptr(data);
if (put_user(cmdp, &sgio->cmdp) ||
put_user(sbp, &sgio->sbp))
return -EFAULT;
}
if (copy_in_user(&sgio->timeout, &sgio32->timeout,
3 * sizeof(int)))
return -EFAULT;
if (get_user(data, &sgio32->usr_ptr))
return -EFAULT;
if (put_user(compat_ptr(data), &sgio->usr_ptr))
return -EFAULT;
err = nvme_sg_io(ns, sgio);
if (err >= 0) {
void __user *datap;
if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
sizeof(int)) ||
get_user(datap, &sgio->usr_ptr) ||
put_user((u32)(unsigned long)datap,
&sgio32->usr_ptr) ||
copy_in_user(&sgio32->status, &sgio->status,
(4 * sizeof(unsigned char)) +
(2 * sizeof(unsigned short)) +
(3 * sizeof(int))))
err = -EFAULT;
}
return err;
}
#endif
int nvme_sg_get_version_num(int __user *ip) int nvme_sg_get_version_num(int __user *ip)
{ {
return put_user(sg_version_num, ip); return put_user(sg_version_num, ip);

View file

@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST || if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) { !rb_next(&persistent_gnt->node)) {
ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page; pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); ret = gnttab_unmap_refs(unmap, NULL, pages,
segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0; segs_to_unmap = 0;
@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
kfree(persistent_gnt); kfree(persistent_gnt);
} }
if (segs_to_unmap > 0) { if (segs_to_unmap > 0) {
ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap); ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap); put_free_pages(blkif, pages, segs_to_unmap);
} }
@ -668,14 +670,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
GNTMAP_host_map, pages[i]->handle); GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE; pages[i]->handle = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) { if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount); put_free_pages(blkif, unmap_pages, invcount);
invcount = 0; invcount = 0;
} }
} }
if (invcount) { if (invcount) {
ret = gnttab_unmap_refs(unmap, unmap_pages, invcount); ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret); BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount); put_free_pages(blkif, unmap_pages, invcount);
} }
@ -737,7 +740,7 @@ again:
} }
if (segs_to_map) { if (segs_to_map) {
ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map); ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
BUG_ON(ret); BUG_ON(ret);
} }

View file

@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
} else { } else {
/* Failback to copying a page */ /* Failback to copying a page */
struct page *page = alloc_page(GFP_KERNEL); struct page *page = alloc_page(GFP_KERNEL);
char *src = buf->ops->map(pipe, buf, 1); char *src;
char *dst;
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
dst = kmap(page);
offset = sd->pos & ~PAGE_MASK; offset = sd->pos & ~PAGE_MASK;
@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE) if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset; len = PAGE_SIZE - offset;
memcpy(dst + offset, src + buf->offset, len); src = buf->ops->map(pipe, buf, 1);
memcpy(page_address(page) + offset, src + buf->offset, len);
kunmap(page);
buf->ops->unmap(pipe, buf, src); buf->ops->unmap(pipe, buf, src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset); sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);

View file

@ -57,6 +57,7 @@ struct sample {
int32_t core_pct_busy; int32_t core_pct_busy;
u64 aperf; u64 aperf;
u64 mperf; u64 mperf;
unsigned long long tsc;
int freq; int freq;
}; };
@ -96,6 +97,7 @@ struct cpudata {
u64 prev_aperf; u64 prev_aperf;
u64 prev_mperf; u64 prev_mperf;
unsigned long long prev_tsc;
int sample_ptr; int sample_ptr;
struct sample samples[SAMPLE_COUNT]; struct sample samples[SAMPLE_COUNT];
}; };
@ -548,30 +550,41 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample) struct sample *sample)
{ {
u64 core_pct; u64 core_pct;
core_pct = div64_u64(int_tofp(sample->aperf * 100), u64 c0_pct;
sample->mperf);
sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
sample->core_pct_busy = core_pct; core_pct = div64_u64(sample->aperf * 100, sample->mperf);
c0_pct = div64_u64(sample->mperf * 100, sample->tsc);
sample->freq = fp_toint(
mul_fp(int_tofp(cpu->pstate.max_pstate),
int_tofp(core_pct * 1000)));
sample->core_pct_busy = mul_fp(int_tofp(core_pct),
div_fp(int_tofp(c0_pct + 1), int_tofp(100)));
} }
static inline void intel_pstate_sample(struct cpudata *cpu) static inline void intel_pstate_sample(struct cpudata *cpu)
{ {
u64 aperf, mperf; u64 aperf, mperf;
unsigned long long tsc;
rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf); rdmsrl(MSR_IA32_MPERF, mperf);
tsc = native_read_tsc();
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
cpu->samples[cpu->sample_ptr].aperf = aperf; cpu->samples[cpu->sample_ptr].aperf = aperf;
cpu->samples[cpu->sample_ptr].mperf = mperf; cpu->samples[cpu->sample_ptr].mperf = mperf;
cpu->samples[cpu->sample_ptr].tsc = tsc;
cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
cpu->prev_aperf = aperf; cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf; cpu->prev_mperf = mperf;
cpu->prev_tsc = tsc;
} }
static inline void intel_pstate_set_sample_time(struct cpudata *cpu) static inline void intel_pstate_set_sample_time(struct cpudata *cpu)

View file

@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should * then the BO is being moved and we should
* store up the damage until later. * store up the damage until later.
*/ */
if (!drm_can_sleep()) if (drm_can_sleep())
ret = ast_bo_reserve(bo, true); ret = ast_bo_reserve(bo, true);
if (ret) { if (ret) {
if (ret != -EBUSY) if (ret != -EBUSY)

View file

@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should * then the BO is being moved and we should
* store up the damage until later. * store up the damage until later.
*/ */
if (!drm_can_sleep()) if (drm_can_sleep())
ret = cirrus_bo_reserve(bo, true); ret = cirrus_bo_reserve(bo, true);
if (ret) { if (ret) {
if (ret != -EBUSY) if (ret != -EBUSY)

View file

@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should * then the BO is being moved and we should
* store up the damage until later. * store up the damage until later.
*/ */
if (!drm_can_sleep()) if (drm_can_sleep())
ret = mgag200_bo_reserve(bo, true); ret = mgag200_bo_reserve(bo, true);
if (ret) { if (ret) {
if (ret != -EBUSY) if (ret != -EBUSY)

View file

@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
(mga_vga_calculate_mode_bandwidth(mode, bpp) (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (32700 * 1024))) { > (32700 * 1024))) {
return MODE_BANDWIDTH; return MODE_BANDWIDTH;
} else if (mode->type == G200_EH && } else if (mdev->type == G200_EH &&
(mga_vga_calculate_mode_bandwidth(mode, bpp) (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (37500 * 1024))) { > (37500 * 1024))) {
return MODE_BANDWIDTH; return MODE_BANDWIDTH;
} else if (mode->type == G200_ER && } else if (mdev->type == G200_ER &&
(mga_vga_calculate_mode_bandwidth(mode, (mga_vga_calculate_mode_bandwidth(mode,
bpp) > (55000 * 1024))) { bpp) > (55000 * 1024))) {
return MODE_BANDWIDTH; return MODE_BANDWIDTH;

View file

@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_008C64_SQ_VSTMP_RING_SIZE: case R_008C64_SQ_VSTMP_RING_SIZE:
case R_0288C8_SQ_GS_VERT_ITEMSIZE: case R_0288C8_SQ_GS_VERT_ITEMSIZE:
/* get value to populate the IB don't remove */ /* get value to populate the IB don't remove */
tmp =radeon_get_ib_value(p, idx); /*tmp =radeon_get_ib_value(p, idx);
ib[idx] = 0; ib[idx] = 0;*/
break;
case SQ_ESGS_RING_BASE:
case SQ_GSVS_RING_BASE:
case SQ_ESTMP_RING_BASE:
case SQ_GSTMP_RING_BASE:
case SQ_PSTMP_RING_BASE:
case SQ_VSTMP_RING_BASE:
r = radeon_cs_packet_next_reloc(p, &reloc, 0);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
"0x%04X\n", reg);
return -EINVAL;
}
ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break; break;
case SQ_CONFIG: case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx); track->sq_config = radeon_get_ib_value(p, idx);

View file

@ -78,9 +78,10 @@
* 2.34.0 - Add CIK tiling mode array query * 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query * 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup * 2.36.0 - Fix CIK DCE tiling setup
* 2.37.0 - allow GS ring setup on r6xx/r7xx
*/ */
#define KMS_DRIVER_MAJOR 2 #define KMS_DRIVER_MAJOR 2
#define KMS_DRIVER_MINOR 36 #define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0 #define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev); int radeon_driver_unload_kms(struct drm_device *dev);

View file

@ -18,6 +18,7 @@ r600 0x9400
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL 0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE 0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE 0x00028A6C VGT_GS_OUT_PRIM_TYPE
0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES 0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS 0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE 0x000088D4 VGT_GS_VERTEX_REUSE

View file

@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (ret == 0) { if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash); ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
if (!kref_get_unless_zero(&ref->kref)) { if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock(); rcu_read_unlock();
break; break;
} }

View file

@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
pgoff_t i; pgoff_t i;
struct page **page = ttm->pages; struct page **page = ttm->pages;
if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return;
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
(*page)->mapping = NULL; (*page)->mapping = NULL;
(*page++)->index = 0; (*page++)->index = 0;

View file

@ -2583,4 +2583,28 @@ typedef union {
float f; float f;
} SVGA3dDevCapResult; } SVGA3dDevCapResult;
typedef enum {
SVGA3DCAPS_RECORD_UNKNOWN = 0,
SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
} SVGA3dCapsRecordType;
typedef
struct SVGA3dCapsRecordHeader {
uint32 length;
SVGA3dCapsRecordType type;
}
SVGA3dCapsRecordHeader;
typedef
struct SVGA3dCapsRecord {
SVGA3dCapsRecordHeader header;
uint32 data[1];
}
SVGA3dCapsRecord;
typedef uint32 SVGA3dCapPair[2];
#endif /* _SVGA3D_REG_H_ */ #endif /* _SVGA3D_REG_H_ */

View file

@ -37,7 +37,7 @@ struct vmw_user_context {
typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *); typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
static void vmw_user_context_free(struct vmw_resource *res); static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource * static struct vmw_resource *
@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback, bool readback,
struct ttm_validate_buffer *val_buf); struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res); static int vmw_gb_context_destroy(struct vmw_resource *res);
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi); static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi); static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi); bool rebind);
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size; static uint64_t vmw_user_context_size;
@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) { if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex); mutex_lock(&dev_priv->cmdbuf_mutex);
mutex_lock(&dev_priv->binding_mutex);
(void) vmw_context_binding_state_kill
(&container_of(res, struct vmw_user_context, res)->cbs);
(void) vmw_gb_context_destroy(res); (void) vmw_gb_context_destroy(res);
if (dev_priv->pinned_bo != NULL && if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid) !dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL); __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
mutex_unlock(&dev_priv->binding_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
return; return;
} }
@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB); BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_state_kill(&uctx->cbs); vmw_context_binding_state_scrub(&uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
SVGA3dCmdHeader header; SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body; SVGA3dCmdDestroyGBContext body;
} *cmd; } *cmd;
struct vmw_user_context *uctx =
container_of(res, struct vmw_user_context, res);
BUG_ON(!list_empty(&uctx->cbs.list));
if (likely(res->id == -1)) if (likely(res->id == -1))
return 0; return 0;
@ -528,8 +530,9 @@ out_unlock:
* vmw_context_scrub_shader - scrub a shader binding from a context. * vmw_context_scrub_shader - scrub a shader binding from a context.
* *
* @bi: single binding information. * @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/ */
static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi) static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{ {
struct vmw_private *dev_priv = bi->ctx->dev_priv; struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct { struct {
@ -548,7 +551,8 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id; cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type; cmd->body.type = bi->i1.shader_type;
cmd->body.shid = SVGA3D_INVALID_ID; cmd->body.shid =
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0; return 0;
@ -559,8 +563,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
* from a context. * from a context.
* *
* @bi: single binding information. * @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
*/ */
static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi) static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
bool rebind)
{ {
struct vmw_private *dev_priv = bi->ctx->dev_priv; struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct { struct {
@ -579,7 +585,8 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body); cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id; cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type; cmd->body.type = bi->i1.rt_type;
cmd->body.target.sid = SVGA3D_INVALID_ID; cmd->body.target.sid =
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0; cmd->body.target.face = 0;
cmd->body.target.mipmap = 0; cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
@ -591,11 +598,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
* vmw_context_scrub_texture - scrub a texture binding from a context. * vmw_context_scrub_texture - scrub a texture binding from a context.
* *
* @bi: single binding information. * @bi: single binding information.
* @rebind: Whether to issue a bind instead of scrub command.
* *
* TODO: Possibly complement this function with a function that takes * TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command. * a list of texture bindings and combines them to a single command.
*/ */
static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi) static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
bool rebind)
{ {
struct vmw_private *dev_priv = bi->ctx->dev_priv; struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct { struct {
@ -619,7 +628,8 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
cmd->body.c.cid = bi->ctx->id; cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage; cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID; cmd->body.s1.value =
cpu_to_le32((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0; return 0;
@ -692,6 +702,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
vmw_context_binding_drop(loc); vmw_context_binding_drop(loc);
loc->bi = *bi; loc->bi = *bi;
loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list); list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list); INIT_LIST_HEAD(&loc->res_list);
@ -727,12 +738,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
if (loc->bi.ctx != NULL) if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc); vmw_context_binding_drop(loc);
loc->bi = *bi; if (bi->res != NULL) {
list_add_tail(&loc->ctx_list, &cbs->list); loc->bi = *bi;
if (bi->res != NULL) list_add_tail(&loc->ctx_list, &cbs->list);
list_add_tail(&loc->res_list, &bi->res->binding_head); list_add_tail(&loc->res_list, &bi->res->binding_head);
else }
INIT_LIST_HEAD(&loc->res_list);
} }
/** /**
@ -746,7 +756,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
*/ */
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{ {
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi); if (!cb->bi.scrubbed) {
(void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
cb->bi.scrubbed = true;
}
vmw_context_binding_drop(cb); vmw_context_binding_drop(cb);
} }
@ -767,6 +780,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
vmw_context_binding_kill(entry); vmw_context_binding_kill(entry);
} }
/**
* vmw_context_binding_state_scrub - Scrub all bindings associated with a
* struct vmw_ctx_binding state structure.
*
* @cbs: Pointer to the context binding state tracker.
*
* Emits commands to scrub all bindings associated with the
* context binding state tracker.
*/
static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
{
struct vmw_ctx_binding *entry;
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
/** /**
* vmw_context_binding_res_list_kill - Kill all bindings on a * vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list * resource binding list
@ -784,6 +818,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
vmw_context_binding_kill(entry); vmw_context_binding_kill(entry);
} }
/**
* vmw_context_binding_res_list_scrub - Scrub all bindings on a
* resource binding list
*
* @head: list head of resource binding list
*
* Scrub all bindings associated with a specific resource. Typically
* called before the resource is evicted.
*/
void vmw_context_binding_res_list_scrub(struct list_head *head)
{
struct vmw_ctx_binding *entry;
list_for_each_entry(entry, head, res_list) {
if (!entry->bi.scrubbed) {
(void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
entry->bi.scrubbed = true;
}
}
}
/** /**
* vmw_context_binding_state_transfer - Commit staged binding info * vmw_context_binding_state_transfer - Commit staged binding info
* *
@ -803,3 +858,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
list_for_each_entry_safe(entry, next, &from->list, ctx_list) list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi); vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
} }
/**
* vmw_context_rebind_all - Rebind all scrubbed bindings of a context
*
* @ctx: The context resource
*
* Walks through the context binding list and rebinds all scrubbed
* resources.
*/
int vmw_context_rebind_all(struct vmw_resource *ctx)
{
struct vmw_ctx_binding *entry;
struct vmw_user_context *uctx =
container_of(ctx, struct vmw_user_context, res);
struct vmw_ctx_binding_state *cbs = &uctx->cbs;
int ret;
list_for_each_entry(entry, &cbs->list, ctx_list) {
if (likely(!entry->bi.scrubbed))
continue;
if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
SVGA3D_INVALID_ID))
continue;
ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
if (unlikely(ret != 0))
return ret;
entry->bi.scrubbed = false;
}
return 0;
}
/**
* vmw_context_binding_list - Return a list of context bindings
*
* @ctx: The context resource
*
* Returns the current list of bindings of the given context. Note that
* this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
*/
struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
{
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
}

View file

@ -941,6 +941,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master); drm_master_put(&vmw_fp->locked_master);
} }
vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile); ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp); kfree(vmw_fp);
} }
@ -960,11 +961,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL)) if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile; goto out_no_tfile;
vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
if (IS_ERR(vmw_fp->shman))
goto out_no_shman;
file_priv->driver_priv = vmw_fp; file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping; dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0; return 0;
out_no_shman:
ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile: out_no_tfile:
kfree(vmw_fp); kfree(vmw_fp);
return ret; return ret;

View file

@ -75,10 +75,14 @@
#define VMW_RES_FENCE ttm_driver_type3 #define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4 #define VMW_RES_SHADER ttm_driver_type4
struct vmw_compat_shader_manager;
struct vmw_fpriv { struct vmw_fpriv {
struct drm_master *locked_master; struct drm_master *locked_master;
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
struct list_head fence_events; struct list_head fence_events;
bool gb_aware;
struct vmw_compat_shader_manager *shman;
}; };
struct vmw_dma_buffer { struct vmw_dma_buffer {
@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
struct vmw_resource *ctx; struct vmw_resource *ctx;
struct vmw_resource *res; struct vmw_resource *res;
enum vmw_ctx_binding_type bt; enum vmw_ctx_binding_type bt;
bool scrubbed;
union { union {
SVGA3dShaderType shader_type; SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type; SVGA3dRenderTargetType rt_type;
@ -318,7 +323,7 @@ struct vmw_sw_context{
struct drm_open_hash res_ht; struct drm_open_hash res_ht;
bool res_ht_initialized; bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */ bool kernel; /**< is the called made from the kernel */
struct ttm_object_file *tfile; struct vmw_fpriv *fp;
struct list_head validate_nodes; struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS]; struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc; uint32_t cur_reloc;
@ -336,6 +341,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier; bool needs_post_query_barrier;
struct vmw_resource *error_resource; struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings; struct vmw_ctx_binding_state staged_bindings;
struct list_head staged_shaders;
}; };
struct vmw_legacy_display; struct vmw_legacy_display;
@ -569,6 +575,8 @@ struct vmw_user_resource_conv;
extern void vmw_resource_unreference(struct vmw_resource **p_res); extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res); extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
extern struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res); extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup); extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res); extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@ -957,6 +965,9 @@ extern void
vmw_context_binding_state_transfer(struct vmw_resource *res, vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs); struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head); extern void vmw_context_binding_res_list_kill(struct list_head *head);
extern void vmw_context_binding_res_list_scrub(struct list_head *head);
extern int vmw_context_rebind_all(struct vmw_resource *ctx);
extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
/* /*
* Surface management - vmwgfx_surface.c * Surface management - vmwgfx_surface.c
@ -991,6 +1002,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data, extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
SVGA3dShaderType shader_type,
u32 *user_key);
extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
struct list_head *list);
extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
struct list_head *list);
extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
u32 user_key,
SVGA3dShaderType shader_type,
struct list_head *list);
extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct ttm_object_file *tfile,
struct list_head *list);
extern struct vmw_compat_shader_manager *
vmw_compat_shader_man_create(struct vmw_private *dev_priv);
extern void
vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
/** /**
* Inline helper functions * Inline helper functions

View file

@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
* persistent context binding tracker. * persistent context binding tracker.
*/ */
if (unlikely(val->staged_bindings)) { if (unlikely(val->staged_bindings)) {
vmw_context_binding_state_transfer if (!backoff) {
(val->res, val->staged_bindings); vmw_context_binding_state_transfer
(val->res, val->staged_bindings);
}
kfree(val->staged_bindings); kfree(val->staged_bindings);
val->staged_bindings = NULL; val->staged_bindings = NULL;
} }
@ -177,6 +179,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
return 0; return 0;
} }
/**
* vmw_resource_context_res_add - Put resources previously bound to a context on
* the validation list
*
* @dev_priv: Pointer to a device private structure
* @sw_context: Pointer to a software context used for this command submission
* @ctx: Pointer to the context resource
*
* This function puts all resources that were previously bound to @ctx on
* the resource validation list. This is part of the context state reemission
*/
static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
struct vmw_resource *ctx)
{
struct list_head *binding_list;
struct vmw_ctx_binding *entry;
int ret = 0;
struct vmw_resource *res;
mutex_lock(&dev_priv->binding_mutex);
binding_list = vmw_context_binding_list(ctx);
list_for_each_entry(entry, binding_list, ctx_list) {
res = vmw_resource_reference_unless_doomed(entry->bi.res);
if (unlikely(res == NULL))
continue;
ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
vmw_resource_unreference(&res);
if (unlikely(ret != 0))
break;
}
mutex_unlock(&dev_priv->binding_mutex);
return ret;
}
/** /**
* vmw_resource_relocation_add - Add a relocation to the relocation list * vmw_resource_relocation_add - Add a relocation to the relocation list
* *
@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{ {
struct vmw_resource_relocation *rel; struct vmw_resource_relocation *rel;
list_for_each_entry(rel, list, head) list_for_each_entry(rel, list, head) {
cb[rel->offset] = rel->res->id; if (likely(rel->res != NULL))
cb[rel->offset] = rel->res->id;
else
cb[rel->offset] = SVGA_3D_CMD_NOP;
}
} }
static int vmw_cmd_invalid(struct vmw_private *dev_priv, static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
} }
/** /**
* vmw_cmd_res_check - Check that a resource is present and if so, put it * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there. * on the resource validate list unless it's already there.
* *
* @dev_priv: Pointer to a device private structure. * @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context. * @sw_context: Pointer to the software context.
* @res_type: Resource type. * @res_type: Resource type.
* @converter: User-space visisble type specific information. * @converter: User-space visisble type specific information.
* @id: Pointer to the location in the command buffer currently being * @id: user-space resource id handle.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located. * parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/ */
static int vmw_cmd_res_check(struct vmw_private *dev_priv, static int
struct vmw_sw_context *sw_context, vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
enum vmw_res_type res_type, struct vmw_sw_context *sw_context,
const struct vmw_user_resource_conv *converter, enum vmw_res_type res_type,
uint32_t *id, const struct vmw_user_resource_conv *converter,
struct vmw_resource_val_node **p_val) uint32_t id,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
{ {
struct vmw_res_cache_entry *rcache = struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type]; &sw_context->res_cache[res_type];
@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node; struct vmw_resource_val_node *node;
int ret; int ret;
if (*id == SVGA3D_INVALID_ID) { if (id == SVGA3D_INVALID_ID) {
if (p_val) if (p_val)
*p_val = NULL; *p_val = NULL;
if (res_type == vmw_res_context) { if (res_type == vmw_res_context) {
@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
* resource * resource
*/ */
if (likely(rcache->valid && *id == rcache->handle)) { if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res; const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false; rcache->node->first_usage = false;
@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add return vmw_resource_relocation_add
(&sw_context->res_relocations, res, (&sw_context->res_relocations, res,
id - sw_context->buf_start); id_loc - sw_context->buf_start);
} }
ret = vmw_user_resource_lookup_handle(dev_priv, ret = vmw_user_resource_lookup_handle(dev_priv,
sw_context->tfile, sw_context->fp->tfile,
*id, id,
converter, converter,
&res); &res);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n", DRM_ERROR("Could not find or use resource 0x%08x.\n",
(unsigned) *id); (unsigned) id);
dump_stack(); dump_stack();
return ret; return ret;
} }
rcache->valid = true; rcache->valid = true;
rcache->res = res; rcache->res = res;
rcache->handle = *id; rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations, ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res, res,
id - sw_context->buf_start); id_loc - sw_context->buf_start);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_reloc; goto out_no_reloc;
@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
if (p_val) if (p_val)
*p_val = node; *p_val = node;
if (node->first_usage && res_type == vmw_res_context) { if (dev_priv->has_mob && node->first_usage &&
res_type == vmw_res_context) {
ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
if (unlikely(ret != 0))
goto out_no_reloc;
node->staged_bindings = node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) { if (node->staged_bindings == NULL) {
@ -480,6 +533,59 @@ out_no_reloc:
return ret; return ret;
} }
/**
* vmw_cmd_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
* @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
* @p_val: Pointer to pointer to resource validalidation node. Populated
* on exit.
*/
static int
vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
enum vmw_res_type res_type,
const struct vmw_user_resource_conv *converter,
uint32_t *id_loc,
struct vmw_resource_val_node **p_val)
{
return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
converter, *id_loc, id_loc, p_val);
}
/**
* vmw_rebind_contexts - Rebind all resources previously bound to
* referenced contexts.
*
* @sw_context: Pointer to the software context.
*
* Rebind context binding points that have been scrubbed because of eviction.
*/
static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
{
struct vmw_resource_val_node *val;
int ret;
list_for_each_entry(val, &sw_context->resource_list, head) {
if (likely(!val->staged_bindings))
continue;
ret = vmw_context_rebind_all(val->res);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Failed to rebind context.\n");
return ret;
}
}
return 0;
}
/** /**
* vmw_cmd_cid_check - Check a command header for valid context information. * vmw_cmd_cid_check - Check a command header for valid context information.
* *
@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL; return -EINVAL;
@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL; return -EINVAL;
@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res); srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header); vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
header);
out_no_surface: out_no_surface:
vmw_dmabuf_unreference(&vmw_bo); vmw_dmabuf_unreference(&vmw_bo);
@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
&cmd->body.sid, NULL); &cmd->body.sid, NULL);
} }
/**
* vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_shader_define_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDefineShader body;
} *cmd;
int ret;
size_t size;
cmd = container_of(header, struct vmw_shader_define_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
NULL);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
size = cmd->header.size - sizeof(cmd->body);
ret = vmw_compat_shader_add(sw_context->fp->shman,
cmd->body.shid, cmd + 1,
cmd->body.type, size,
sw_context->fp->tfile,
&sw_context->staged_shaders);
if (unlikely(ret != 0))
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
sw_context->buf_start);
return 0;
}
/**
* vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_shader_destroy_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdDestroyShader body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_shader_destroy_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
NULL);
if (unlikely(ret != 0))
return ret;
if (unlikely(!dev_priv->has_mob))
return 0;
ret = vmw_compat_shader_remove(sw_context->fp->shman,
cmd->body.shid,
cmd->body.type,
&sw_context->staged_shaders);
if (unlikely(ret != 0))
return ret;
return vmw_resource_relocation_add(&sw_context->res_relocations,
NULL, &cmd->header.id -
sw_context->buf_start);
return 0;
}
/** /**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command * command
@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (dev_priv->has_mob) { if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi; struct vmw_ctx_bindinfo bi;
struct vmw_resource_val_node *res_node; struct vmw_resource_val_node *res_node;
u32 shid = cmd->body.shid;
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader, if (shid != SVGA3D_INVALID_ID)
user_shader_converter, (void) vmw_compat_shader_lookup(sw_context->fp->shman,
&cmd->body.shid, &res_node); cmd->body.type,
&shid);
ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
vmw_res_shader,
user_shader_converter,
shid,
&cmd->body.shid, &res_node);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
@ -1526,6 +1733,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
return 0; return 0;
} }
/**
* vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
* command
*
* @dev_priv: Pointer to a device private struct.
* @sw_context: The software context being used for this batch.
* @header: Pointer to the command header in the command stream.
*/
static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
struct vmw_set_shader_const_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSetShaderConst body;
} *cmd;
int ret;
cmd = container_of(header, struct vmw_set_shader_const_cmd,
header);
ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
user_context_converter, &cmd->body.cid,
NULL);
if (unlikely(ret != 0))
return ret;
if (dev_priv->has_mob)
header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
return 0;
}
/** /**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command * command
@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check, VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
false, false, false), false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check, VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
true, true, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check, VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
true, true, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader, VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
true, false, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check, VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
true, true, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw, VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
true, false, false), true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check, VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@ -2171,7 +2411,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else } else
sw_context->kernel = true; sw_context->kernel = true;
sw_context->tfile = vmw_fpriv(file_priv)->tfile; sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0; sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0; sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0; sw_context->fence_flags = 0;
@ -2188,16 +2428,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock; goto out_unlock;
sw_context->res_ht_initialized = true; sw_context->res_ht_initialized = true;
} }
INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list); INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size); command_size);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err_nores;
ret = vmw_resources_reserve(sw_context); ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_err; goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes); ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
@ -2225,6 +2466,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err; goto out_err;
} }
if (dev_priv->has_mob) {
ret = vmw_rebind_contexts(sw_context);
if (unlikely(ret != 0))
goto out_err;
}
cmd = vmw_fifo_reserve(dev_priv, command_size); cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) { if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n"); DRM_ERROR("Failed reserving fifo space for commands.\n");
@ -2276,6 +2523,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} }
list_splice_init(&sw_context->resource_list, &resource_list); list_splice_init(&sw_context->resource_list, &resource_list);
vmw_compat_shaders_commit(sw_context->fp->shman,
&sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
/* /*
@ -2289,10 +2538,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
out_unlock_binding: out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex); mutex_unlock(&dev_priv->binding_mutex);
out_err: out_err:
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_resource_relocations_free(&sw_context->res_relocations); vmw_resource_relocations_free(&sw_context->res_relocations);
vmw_free_relocations(sw_context); vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
vmw_resource_list_unreserve(&sw_context->resource_list, true);
vmw_clear_validations(sw_context); vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL && if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)) !dev_priv->query_cid_valid))
@ -2301,6 +2551,8 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list); list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource; error_resource = sw_context->error_resource;
sw_context->error_resource = NULL; sw_context->error_resource = NULL;
vmw_compat_shaders_revert(sw_context->fp->shman,
&sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
/* /*

View file

@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h> #include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h" #include "vmwgfx_kms.h"
struct svga_3d_compat_cap {
SVGA3dCapsRecordHeader header;
SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
};
int vmw_getparam_ioctl(struct drm_device *dev, void *data, int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param = struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data; (struct drm_vmw_getparam_arg *)data;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) { switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS: case DRM_VMW_PARAM_NUM_STREAMS:
@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
__le32 __iomem *fifo_mem = dev_priv->mmio_virt; __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo; const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
param->value = SVGA3D_HWVERSION_WS8_B1;
break;
}
param->value = param->value =
ioread32(fifo_mem + ioread32(fifo_mem +
((fifo->capabilities & ((fifo->capabilities &
@ -69,17 +80,26 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break; break;
} }
case DRM_VMW_PARAM_MAX_SURF_MEMORY: case DRM_VMW_PARAM_MAX_SURF_MEMORY:
param->value = dev_priv->memory_size; if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
!vmw_fp->gb_aware)
param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
else
param->value = dev_priv->memory_size;
break; break;
case DRM_VMW_PARAM_3D_CAPS_SIZE: case DRM_VMW_PARAM_3D_CAPS_SIZE:
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
param->value = SVGA3D_DEVCAP_MAX; vmw_fp->gb_aware)
param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
param->value = sizeof(struct svga_3d_compat_cap) +
sizeof(uint32_t);
else else
param->value = (SVGA_FIFO_3D_CAPS_LAST - param->value = (SVGA_FIFO_3D_CAPS_LAST -
SVGA_FIFO_3D_CAPS + 1); SVGA_FIFO_3D_CAPS + 1) *
param->value *= sizeof(uint32_t); sizeof(uint32_t);
break; break;
case DRM_VMW_PARAM_MAX_MOB_MEMORY: case DRM_VMW_PARAM_MAX_MOB_MEMORY:
vmw_fp->gb_aware = true;
param->value = dev_priv->max_mob_pages * PAGE_SIZE; param->value = dev_priv->max_mob_pages * PAGE_SIZE;
break; break;
default: default:
@ -91,6 +111,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0; return 0;
} }
static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
size_t size)
{
struct svga_3d_compat_cap *compat_cap =
(struct svga_3d_compat_cap *) bounce;
unsigned int i;
size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
unsigned int max_size;
if (size < pair_offset)
return -EINVAL;
max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
if (max_size > SVGA3D_DEVCAP_MAX)
max_size = SVGA3D_DEVCAP_MAX;
compat_cap->header.length =
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
mutex_lock(&dev_priv->hw_mutex);
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
mutex_unlock(&dev_priv->hw_mutex);
return 0;
}
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
@ -104,41 +156,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void *bounce; void *bounce;
int ret; int ret;
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS); bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) { if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n"); DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL; return -EINVAL;
} }
if (gb_objects) if (gb_objects && vmw_fp->gb_aware)
size = SVGA3D_DEVCAP_MAX; size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
else if (gb_objects)
size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
else else
size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1); size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
sizeof(uint32_t);
size *= sizeof(uint32_t);
if (arg->max_size < size) if (arg->max_size < size)
size = arg->max_size; size = arg->max_size;
bounce = vmalloc(size); bounce = vzalloc(size);
if (unlikely(bounce == NULL)) { if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM; return -ENOMEM;
} }
if (gb_objects) { if (gb_objects && vmw_fp->gb_aware) {
int i; int i, num;
uint32_t *bounce32 = (uint32_t *) bounce; uint32_t *bounce32 = (uint32_t *) bounce;
num = size / sizeof(uint32_t);
if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX;
mutex_lock(&dev_priv->hw_mutex); mutex_lock(&dev_priv->hw_mutex);
for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) { for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i); vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP); *bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
} }
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0))
goto out_err;
} else { } else {
fifo_mem = dev_priv->mmio_virt; fifo_mem = dev_priv->mmio_virt;
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
} }
@ -146,6 +206,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(buffer, bounce, size); ret = copy_to_user(buffer, bounce, size);
if (ret) if (ret)
ret = -EFAULT; ret = -EFAULT;
out_err:
vfree(bounce); vfree(bounce);
if (unlikely(ret != 0)) if (unlikely(ret != 0))

View file

@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) { if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n"); DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
ret = -ENOMEM;
goto out_no_fifo; goto out_no_fifo;
} }

View file

@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res; return res;
} }
struct vmw_resource *
vmw_resource_reference_unless_doomed(struct vmw_resource *res)
{
return kref_get_unless_zero(&res->kref) ? res : NULL;
}
/** /**
* vmw_resource_release_id - release a resource id to the id manager. * vmw_resource_release_id - release a resource id to the id manager.
@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup); vmw_dmabuf_unreference(&res->backup);
} }
if (likely(res->hw_destroy != NULL)) if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res); res->hw_destroy(res);
mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head);
mutex_unlock(&dev_priv->binding_mutex);
}
id = res->id; id = res->id;
if (res->res_free != NULL) if (res->res_free != NULL)

View file

@ -29,6 +29,8 @@
#include "vmwgfx_resource_priv.h" #include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h" #include "ttm/ttm_placement.h"
#define VMW_COMPAT_SHADER_HT_ORDER 12
struct vmw_shader { struct vmw_shader {
struct vmw_resource res; struct vmw_resource res;
SVGA3dShaderType type; SVGA3dShaderType type;
@ -40,6 +42,50 @@ struct vmw_user_shader {
struct vmw_shader shader; struct vmw_shader shader;
}; };
/**
* enum vmw_compat_shader_state - Staging state for compat shaders
*/
enum vmw_compat_shader_state {
VMW_COMPAT_COMMITED,
VMW_COMPAT_ADD,
VMW_COMPAT_DEL
};
/**
* struct vmw_compat_shader - Metadata for compat shaders.
*
* @handle: The TTM handle of the guest backed shader.
* @tfile: The struct ttm_object_file the guest backed shader is registered
* with.
* @hash: Hash item for lookup.
* @head: List head for staging lists or the compat shader manager list.
* @state: Staging state.
*
* The structure is protected by the cmdbuf lock.
*/
struct vmw_compat_shader {
u32 handle;
struct ttm_object_file *tfile;
struct drm_hash_item hash;
struct list_head head;
enum vmw_compat_shader_state state;
};
/**
* struct vmw_compat_shader_manager - Compat shader manager.
*
* @shaders: Hash table containing staged and commited compat shaders
* @list: List of commited shaders.
* @dev_priv: Pointer to a device private structure.
*
* @shaders and @list are protected by the cmdbuf mutex for now.
*/
struct vmw_compat_shader_manager {
struct drm_open_hash shaders;
struct list_head list;
struct vmw_private *dev_priv;
};
static void vmw_user_shader_free(struct vmw_resource *res); static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource * static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base); vmw_user_shader_base_to_res(struct ttm_base_object *base);
@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
return 0; return 0;
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head); vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) { if (unlikely(cmd == NULL)) {
@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE); TTM_REF_USAGE);
} }
int vmw_shader_alloc(struct vmw_private *dev_priv,
struct vmw_dma_buffer *buffer,
size_t shader_size,
size_t offset,
SVGA3dShaderType shader_type,
struct ttm_object_file *tfile,
u32 *handle)
{
struct vmw_user_shader *ushader;
struct vmw_resource *res, *tmp;
int ret;
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size =
ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_shader_size,
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader "
"creation.\n");
goto out;
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(ushader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
goto out;
}
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, shader_size,
offset, shader_type, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
if (handle)
*handle = ushader->base.hash.key;
out_err:
vmw_resource_unreference(&res);
out:
return ret;
}
int vmw_shader_define_ioctl(struct drm_device *dev, void *data, int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_user_shader *ushader;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_shader_create_arg *arg = struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data; (struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@ -373,69 +487,324 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg; goto out_bad_arg;
} }
/*
* Approximate idr memory usage with 128 bytes. It will be limited
* by maximum number_of shaders anyway.
*/
if (unlikely(vmw_user_shader_size == 0))
vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
+ 128;
ret = ttm_read_lock(&vmaster->lock, true); ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; goto out_bad_arg;
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
vmw_user_shader_size, shader_type, tfile, &arg->shader_handle);
false, true);
if (unlikely(ret != 0)) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for shader"
" creation.\n");
goto out_unlock;
}
ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
if (unlikely(ushader == NULL)) {
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_shader_size);
ret = -ENOMEM;
goto out_unlock;
}
res = &ushader->shader.res;
ushader->base.shareable = false;
ushader->base.tfile = NULL;
/*
* From here on, the destructor takes over resource freeing.
*/
ret = vmw_gb_shader_init(dev_priv, res, arg->size,
arg->offset, shader_type, buffer,
vmw_user_shader_free);
if (unlikely(ret != 0))
goto out_unlock;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &ushader->base, false,
VMW_RES_SHADER,
&vmw_user_shader_base_release, NULL);
if (unlikely(ret != 0)) {
vmw_resource_unreference(&tmp);
goto out_err;
}
arg->shader_handle = ushader->base.hash.key;
out_err:
vmw_resource_unreference(&res);
out_unlock:
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&vmaster->lock);
out_bad_arg: out_bad_arg:
vmw_dmabuf_unreference(&buffer); vmw_dmabuf_unreference(&buffer);
return ret; return ret;
}
/**
* vmw_compat_shader_lookup - Look up a compat shader
*
* @man: Pointer to the compat shader manager.
* @shader_type: The shader type, that combined with the user_key identifies
* the shader.
* @user_key: On entry, this should be a pointer to the user_key.
* On successful exit, it will contain the guest-backed shader's TTM handle.
*
* Returns 0 on success. Non-zero on failure, in which case the value pointed
* to by @user_key is unmodified.
*/
int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
SVGA3dShaderType shader_type,
u32 *user_key)
{
struct drm_hash_item *hash;
int ret;
unsigned long key = *user_key | (shader_type << 24);
ret = drm_ht_find_item(&man->shaders, key, &hash);
if (unlikely(ret != 0))
return ret;
*user_key = drm_hash_entry(hash, struct vmw_compat_shader,
hash)->handle;
return 0;
}
/**
* vmw_compat_shader_free - Free a compat shader.
*
* @man: Pointer to the compat shader manager.
* @entry: Pointer to a struct vmw_compat_shader.
*
* Frees a struct vmw_compat_shder entry and drops its reference to the
* guest backed shader.
*/
static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
struct vmw_compat_shader *entry)
{
list_del(&entry->head);
WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
TTM_REF_USAGE));
kfree(entry);
}
/**
* vmw_compat_shaders_commit - Commit a list of compat shader actions.
*
* @man: Pointer to the compat shader manager.
* @list: Caller's list of compat shader actions.
*
* This function commits a list of compat shader additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions has commited the fifo contents to the device.
*/
void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
struct list_head *list)
{
struct vmw_compat_shader *entry, *next;
list_for_each_entry_safe(entry, next, list, head) {
list_del(&entry->head);
switch (entry->state) {
case VMW_COMPAT_ADD:
entry->state = VMW_COMPAT_COMMITED;
list_add_tail(&entry->head, &man->list);
break;
case VMW_COMPAT_DEL:
ttm_ref_object_base_unref(entry->tfile, entry->handle,
TTM_REF_USAGE);
kfree(entry);
break;
default:
BUG();
break;
}
}
}
/**
* vmw_compat_shaders_revert - Revert a list of compat shader actions
*
* @man: Pointer to the compat shader manager.
* @list: Caller's list of compat shader actions.
*
* This function reverts a list of compat shader additions or removals.
* It is typically called when the execbuf ioctl call triggering these
* actions failed for some reason, and the command stream was never
* submitted.
*/
void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
struct list_head *list)
{
struct vmw_compat_shader *entry, *next;
int ret;
list_for_each_entry_safe(entry, next, list, head) {
switch (entry->state) {
case VMW_COMPAT_ADD:
vmw_compat_shader_free(man, entry);
break;
case VMW_COMPAT_DEL:
ret = drm_ht_insert_item(&man->shaders, &entry->hash);
list_del(&entry->head);
list_add_tail(&entry->head, &man->list);
entry->state = VMW_COMPAT_COMMITED;
break;
default:
BUG();
break;
}
}
}
/**
* vmw_compat_shader_remove - Stage a compat shader for removal.
*
* @man: Pointer to the compat shader manager
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @shader_type: Shader type.
* @list: Caller's list of staged shader actions.
*
* This function stages a compat shader for removal and removes the key from
* the shader manager's hash table. If the shader was previously only staged
* for addition it is completely removed (But the execbuf code may keep a
* reference if it was bound to a context between addition and removal). If
* it was previously commited to the manager, it is staged for removal.
*/
int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
u32 user_key, SVGA3dShaderType shader_type,
struct list_head *list)
{
struct vmw_compat_shader *entry;
struct drm_hash_item *hash;
int ret;
ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
&hash);
if (likely(ret != 0))
return -EINVAL;
entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
switch (entry->state) {
case VMW_COMPAT_ADD:
vmw_compat_shader_free(man, entry);
break;
case VMW_COMPAT_COMMITED:
(void) drm_ht_remove_item(&man->shaders, &entry->hash);
list_del(&entry->head);
entry->state = VMW_COMPAT_DEL;
list_add_tail(&entry->head, list);
break;
default:
BUG();
break;
}
return 0;
}
/**
* vmw_compat_shader_add - Create a compat shader and add the
* key to the manager
*
* @man: Pointer to the compat shader manager
* @user_key: The key that is used to identify the shader. The key is
* unique to the shader type.
* @bytecode: Pointer to the bytecode of the shader.
* @shader_type: Shader type.
* @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
* to be created with.
* @list: Caller's list of staged shader actions.
*
* Note that only the key is added to the shader manager's hash table.
* The shader is not yet added to the shader manager's list of shaders.
*/
int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
u32 user_key, const void *bytecode,
SVGA3dShaderType shader_type,
size_t size,
struct ttm_object_file *tfile,
struct list_head *list)
{
struct vmw_dma_buffer *buf;
struct ttm_bo_kmap_obj map;
bool is_iomem;
struct vmw_compat_shader *compat;
u32 handle;
int ret;
if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
return -EINVAL;
/* Allocate and pin a DMA buffer */
buf = kzalloc(sizeof(*buf), GFP_KERNEL);
if (unlikely(buf == NULL))
return -ENOMEM;
ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
goto out;
ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
if (unlikely(ret != 0))
goto no_reserve;
/* Map and copy shader bytecode. */
ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
&map);
if (unlikely(ret != 0)) {
ttm_bo_unreserve(&buf->base);
goto no_reserve;
}
memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
WARN_ON(is_iomem);
ttm_bo_kunmap(&map);
ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
WARN_ON(ret != 0);
ttm_bo_unreserve(&buf->base);
/* Create a guest-backed shader container backed by the dma buffer */
ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
tfile, &handle);
vmw_dmabuf_unreference(&buf);
if (unlikely(ret != 0))
goto no_reserve;
/*
* Create a compat shader structure and stage it for insertion
* in the manager
*/
compat = kzalloc(sizeof(*compat), GFP_KERNEL);
if (compat == NULL)
goto no_compat;
compat->hash.key = user_key | (shader_type << 24);
ret = drm_ht_insert_item(&man->shaders, &compat->hash);
if (unlikely(ret != 0))
goto out_invalid_key;
compat->state = VMW_COMPAT_ADD;
compat->handle = handle;
compat->tfile = tfile;
list_add_tail(&compat->head, list);
return 0;
out_invalid_key:
kfree(compat);
no_compat:
ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
no_reserve:
out:
return ret;
}
/**
* vmw_compat_shader_man_create - Create a compat shader manager
*
* @dev_priv: Pointer to a device private structure.
*
* Typically done at file open time. If successful returns a pointer to a
* compat shader manager. Otherwise returns an error pointer.
*/
struct vmw_compat_shader_manager *
vmw_compat_shader_man_create(struct vmw_private *dev_priv)
{
struct vmw_compat_shader_manager *man;
int ret;
man = kzalloc(sizeof(*man), GFP_KERNEL);
man->dev_priv = dev_priv;
INIT_LIST_HEAD(&man->list);
ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
if (ret == 0)
return man;
kfree(man);
return ERR_PTR(ret);
}
/**
* vmw_compat_shader_man_destroy - Destroy a compat shader manager
*
* @man: Pointer to the shader manager to destroy.
*
* Typically done at file close time.
*/
void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
{
struct vmw_compat_shader *entry, *next;
mutex_lock(&man->dev_priv->cmdbuf_mutex);
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_compat_shader_free(man, entry);
mutex_unlock(&man->dev_priv->cmdbuf_mutex);
kfree(man);
} }

View file

@ -908,8 +908,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr; rep->size_addr;
if (user_sizes) if (user_sizes)
ret = copy_to_user(user_sizes, srf->sizes, ret = copy_to_user(user_sizes, &srf->base_size,
srf->num_sizes * sizeof(*srf->sizes)); sizeof(srf->base_size));
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n", DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes); user_sizes, srf->num_sizes);
@ -1111,7 +1111,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0; return 0;
mutex_lock(&dev_priv->binding_mutex); mutex_lock(&dev_priv->binding_mutex);
vmw_context_binding_res_list_kill(&res->binding_head); vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) { if (unlikely(cmd == NULL)) {

View file

@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
if (hwmon_irq < 0) if (hwmon_irq < 0)
return hwmon_irq; return hwmon_irq;
hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
if (hwmon_irq < 0)
return hwmon_irq;
ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq, ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
NULL, da9055_auxadc_irq, NULL, da9055_auxadc_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,

View file

@ -90,7 +90,8 @@ struct pmbus_data {
u32 flags; /* from platform data */ u32 flags; /* from platform data */
int exponent; /* linear mode: exponent for output voltages */ int exponent[PMBUS_PAGES];
/* linear mode: exponent for output voltages */
const struct pmbus_driver_info *info; const struct pmbus_driver_info *info;
@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
long val; long val;
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */ if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
exponent = data->exponent; exponent = data->exponent[sensor->page];
mantissa = (u16) sensor->data; mantissa = (u16) sensor->data;
} else { /* LINEAR11 */ } else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11; exponent = ((s16)sensor->data) >> 11;
@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
#define MIN_MANTISSA (511 * 1000) #define MIN_MANTISSA (511 * 1000)
static u16 pmbus_data2reg_linear(struct pmbus_data *data, static u16 pmbus_data2reg_linear(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val) struct pmbus_sensor *sensor, long val)
{ {
s16 exponent = 0, mantissa; s16 exponent = 0, mantissa;
bool negative = false; bool negative = false;
@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
if (val == 0) if (val == 0)
return 0; return 0;
if (class == PSC_VOLTAGE_OUT) { if (sensor->class == PSC_VOLTAGE_OUT) {
/* LINEAR16 does not support negative voltages */ /* LINEAR16 does not support negative voltages */
if (val < 0) if (val < 0)
return 0; return 0;
@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
* For a static exponents, we don't have a choice * For a static exponents, we don't have a choice
* but to adjust the value to it. * but to adjust the value to it.
*/ */
if (data->exponent < 0) if (data->exponent[sensor->page] < 0)
val <<= -data->exponent; val <<= -data->exponent[sensor->page];
else else
val >>= data->exponent; val >>= data->exponent[sensor->page];
val = DIV_ROUND_CLOSEST(val, 1000); val = DIV_ROUND_CLOSEST(val, 1000);
return val & 0xffff; return val & 0xffff;
} }
@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
} }
/* Power is in uW. Convert to mW before converting. */ /* Power is in uW. Convert to mW before converting. */
if (class == PSC_POWER) if (sensor->class == PSC_POWER)
val = DIV_ROUND_CLOSEST(val, 1000L); val = DIV_ROUND_CLOSEST(val, 1000L);
/* /*
* For simplicity, convert fan data to milli-units * For simplicity, convert fan data to milli-units
* before calculating the exponent. * before calculating the exponent.
*/ */
if (class == PSC_FAN) if (sensor->class == PSC_FAN)
val = val * 1000; val = val * 1000;
/* Reduce large mantissa until it fits into 10 bit */ /* Reduce large mantissa until it fits into 10 bit */
@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
} }
static u16 pmbus_data2reg_direct(struct pmbus_data *data, static u16 pmbus_data2reg_direct(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val) struct pmbus_sensor *sensor, long val)
{ {
long m, b, R; long m, b, R;
m = data->info->m[class]; m = data->info->m[sensor->class];
b = data->info->b[class]; b = data->info->b[sensor->class];
R = data->info->R[class]; R = data->info->R[sensor->class];
/* Power is in uW. Adjust R and b. */ /* Power is in uW. Adjust R and b. */
if (class == PSC_POWER) { if (sensor->class == PSC_POWER) {
R -= 3; R -= 3;
b *= 1000; b *= 1000;
} }
/* Calculate Y = (m * X + b) * 10^R */ /* Calculate Y = (m * X + b) * 10^R */
if (class != PSC_FAN) { if (sensor->class != PSC_FAN) {
R -= 3; /* Adjust R and b for data in milli-units */ R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000; b *= 1000;
} }
@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
} }
static u16 pmbus_data2reg_vid(struct pmbus_data *data, static u16 pmbus_data2reg_vid(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val) struct pmbus_sensor *sensor, long val)
{ {
val = clamp_val(val, 500, 1600); val = clamp_val(val, 500, 1600);
@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
} }
static u16 pmbus_data2reg(struct pmbus_data *data, static u16 pmbus_data2reg(struct pmbus_data *data,
enum pmbus_sensor_classes class, long val) struct pmbus_sensor *sensor, long val)
{ {
u16 regval; u16 regval;
switch (data->info->format[class]) { switch (data->info->format[sensor->class]) {
case direct: case direct:
regval = pmbus_data2reg_direct(data, class, val); regval = pmbus_data2reg_direct(data, sensor, val);
break; break;
case vid: case vid:
regval = pmbus_data2reg_vid(data, class, val); regval = pmbus_data2reg_vid(data, sensor, val);
break; break;
case linear: case linear:
default: default:
regval = pmbus_data2reg_linear(data, class, val); regval = pmbus_data2reg_linear(data, sensor, val);
break; break;
} }
return regval; return regval;
@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
return -EINVAL; return -EINVAL;
mutex_lock(&data->update_lock); mutex_lock(&data->update_lock);
regval = pmbus_data2reg(data, sensor->class, val); regval = pmbus_data2reg(data, sensor, val);
ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval); ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0) if (ret < 0)
rv = ret; rv = ret;
@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
* This function is called for all chips. * This function is called for all chips.
*/ */
static int pmbus_identify_common(struct i2c_client *client, static int pmbus_identify_common(struct i2c_client *client,
struct pmbus_data *data) struct pmbus_data *data, int page)
{ {
int vout_mode = -1; int vout_mode = -1;
if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); vout_mode = _pmbus_read_byte_data(client, page,
PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) { if (vout_mode >= 0 && vout_mode != 0xff) {
/* /*
* Not all chips support the VOUT_MODE command, * Not all chips support the VOUT_MODE command,
@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != linear) if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV; return -ENODEV;
data->exponent = ((s8)(vout_mode << 3)) >> 3; data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
break; break;
case 1: /* VID mode */ case 1: /* VID mode */
if (data->info->format[PSC_VOLTAGE_OUT] != vid) if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
} }
} }
pmbus_clear_fault_page(client, 0); pmbus_clear_fault_page(client, page);
return 0; return 0;
} }
@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info) struct pmbus_driver_info *info)
{ {
struct device *dev = &client->dev; struct device *dev = &client->dev;
int ret; int page, ret;
/* /*
* Some PMBus chips don't support PMBUS_STATUS_BYTE, so try * Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return -ENODEV; return -ENODEV;
} }
ret = pmbus_identify_common(client, data); for (page = 0; page < info->pages; page++) {
if (ret < 0) { ret = pmbus_identify_common(client, data, page);
dev_err(dev, "Failed to identify chip capabilities\n"); if (ret < 0) {
return ret; dev_err(dev, "Failed to identify chip capabilities\n");
return ret;
}
} }
return 0; return 0;
} }

View file

@ -21,6 +21,7 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o

View file

@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& PCI_MSI_DOORBELL_MASK; & PCI_MSI_DOORBELL_MASK;
writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base + writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
for (msinr = PCI_MSI_DOORBELL_START; for (msinr = PCI_MSI_DOORBELL_START;
@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS) ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK; & IPI_DOORBELL_MASK;
writel(~IPI_DOORBELL_MASK, per_cpu_int_base + writel(~ipimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Handle all pending doorbells */ /* Handle all pending doorbells */

127
drivers/irqchip/irq-zevio.c Normal file
View file

@ -0,0 +1,127 @@
/*
* linux/drivers/irqchip/irq-zevio.c
*
* Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2, as
* published by the Free Software Foundation.
*
*/
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <asm/mach/irq.h>
#include <asm/exception.h>
#include "irqchip.h"
#define IO_STATUS 0x000
#define IO_RAW_STATUS 0x004
#define IO_ENABLE 0x008
#define IO_DISABLE 0x00C
#define IO_CURRENT 0x020
#define IO_RESET 0x028
#define IO_MAX_PRIOTY 0x02C
#define IO_IRQ_BASE 0x000
#define IO_FIQ_BASE 0x100
#define IO_INVERT_SEL 0x200
#define IO_STICKY_SEL 0x204
#define IO_PRIORITY_SEL 0x300
#define MAX_INTRS 32
#define FIQ_START MAX_INTRS
static struct irq_domain *zevio_irq_domain;
static void __iomem *zevio_irq_io;
static void zevio_irq_ack(struct irq_data *irqd)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
struct irq_chip_regs *regs =
&container_of(irqd->chip, struct irq_chip_type, chip)->regs;
readl(gc->reg_base + regs->ack);
}
static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
{
int irqnr;
while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT);
irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
handle_IRQ(irqnr, regs);
};
}
static void __init zevio_init_irq_base(void __iomem *base)
{
/* Disable all interrupts */
writel(~0, base + IO_DISABLE);
/* Accept interrupts of all priorities */
writel(0xF, base + IO_MAX_PRIOTY);
/* Reset existing interrupts */
readl(base + IO_RESET);
}
static int __init zevio_of_init(struct device_node *node,
struct device_node *parent)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct irq_chip_generic *gc;
int ret;
if (WARN_ON(zevio_irq_io || zevio_irq_domain))
return -EBUSY;
zevio_irq_io = of_iomap(node, 0);
BUG_ON(!zevio_irq_io);
/* Do not invert interrupt status bits */
writel(~0, zevio_irq_io + IO_INVERT_SEL);
/* Disable sticky interrupts */
writel(0, zevio_irq_io + IO_STICKY_SEL);
/* We don't use IRQ priorities. Set each IRQ to highest priority. */
memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
/* Init IRQ and FIQ */
zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
&irq_generic_chip_ops, NULL);
BUG_ON(!zevio_irq_domain);
ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
"zevio_intc", handle_level_irq,
clr, 0, IRQ_GC_INIT_MASK_CACHE);
BUG_ON(ret);
gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
gc->reg_base = zevio_irq_io;
gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
set_handle_irq(zevio_handle_irq);
pr_info("TI-NSPIRE classic IRQ controller\n");
return 0;
}
IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);

View file

@ -1176,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
switch (demod) { switch (demod) {
case 0: case 0:
dev_err(&state->priv->i2c->dev, dev_err(&i2c->dev,
"%s: Error attaching frontend %d\n", "%s: Error attaching frontend %d\n",
KBUILD_MODNAME, demod); KBUILD_MODNAME, demod);
goto error1; goto error1;
@ -1200,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->demod = demod - 1; state->demod = demod - 1;
state->priv = priv; state->priv = priv;
/* test i2c bus for ack */
if (demod == 0) {
if (cx24117_readreg(state, 0x00) < 0)
goto error3;
}
dev_info(&state->priv->i2c->dev, dev_info(&state->priv->i2c->dev,
"%s: Attaching frontend %d\n", "%s: Attaching frontend %d\n",
KBUILD_MODNAME, state->demod); KBUILD_MODNAME, state->demod);
@ -1216,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->frontend.demodulator_priv = state; state->frontend.demodulator_priv = state;
return &state->frontend; return &state->frontend;
error3:
kfree(state);
error2: error2:
cx24117_release_priv(priv); cx24117_release_priv(priv);
error1: error1:

View file

@ -2,7 +2,7 @@
* Support for NXT2002 and NXT2004 - VSB/QAM * Support for NXT2002 and NXT2004 - VSB/QAM
* *
* Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com> * Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
* Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net> * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
* based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net> * based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
* and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com> * and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
* *

View file

@ -2554,7 +2554,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force | sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
(pdata->sdp_free_run_cbar_en << 1) | (pdata->sdp_free_run_cbar_en << 1) |
(pdata->sdp_free_run_man_col_en << 2) | (pdata->sdp_free_run_man_col_en << 2) |
(pdata->sdp_free_run_force << 3)); (pdata->sdp_free_run_auto << 3));
/* TODO from platform data */ /* TODO from platform data */
cp_write(sd, 0x69, 0x14); /* Enable CP CSC */ cp_write(sd, 0x69, 0x14); /* Enable CP CSC */

View file

@ -478,25 +478,33 @@ static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
u16 count, const u16 *seq) u16 count, const u16 *seq)
{ {
struct i2c_client *c = v4l2_get_subdevdata(&state->sd); struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
__be16 buf[count + 1]; __be16 buf[65];
int ret, n;
s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr); s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
if (state->error) if (state->error)
return; return;
buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
for (n = 1; n <= count; ++n)
buf[n] = cpu_to_be16(*seq++);
n *= 2;
ret = i2c_master_send(c, (char *)buf, n);
v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count, v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
min(2 * count, 64), seq - count); min(2 * count, 64), seq);
if (ret != n) { buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
state->error = ret; while (count > 0) {
int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
int ret, i;
for (i = 1; i <= n; ++i)
buf[i] = cpu_to_be16(*seq++);
i *= 2;
ret = i2c_master_send(c, (char *)buf, i);
if (ret != i) {
v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
state->error = ret;
break;
}
count -= n;
} }
} }

View file

@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
}, },
/* ---- card 0x87---------------------------------- */ /* ---- card 0x87---------------------------------- */
[BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = { [BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
/* Michael Krufky <mkrufky@m1k.net> */ /* Michael Krufky <mkrufky@linuxtv.org> */
.name = "DViCO FusionHDTV 5 Lite", .name = "DViCO FusionHDTV 5 Lite",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */ .tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
.tuner_addr = ADDR_UNSET, .tuner_addr = ADDR_UNSET,

View file

@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
err = device_register(&sub->dev); err = device_register(&sub->dev);
if (0 != err) { if (0 != err) {
kfree(sub); put_device(&sub->dev);
return err; return err;
} }
pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev)); pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));

View file

@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
}}, }},
}, },
[SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = { [SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
/* Michael Krufky <mkrufky@m1k.net> /* Michael Krufky <mkrufky@linuxtv.org>
* Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder * Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
* AFAIK, there is no analog demod, thus, * AFAIK, there is no analog demod, thus,
* no support for analog television. * no support for analog television.

View file

@ -1027,7 +1027,8 @@ static int fimc_probe(struct platform_device *pdev)
return 0; return 0;
err_gclk: err_gclk:
clk_disable(fimc->clock[CLK_GATE]); if (!pm_runtime_enabled(dev))
clk_disable(fimc->clock[CLK_GATE]);
err_sd: err_sd:
fimc_unregister_capture_subdev(fimc); fimc_unregister_capture_subdev(fimc);
err_sclk: err_sclk:
@ -1036,6 +1037,7 @@ err_sclk:
return ret; return ret;
} }
#ifdef CONFIG_PM_RUNTIME
static int fimc_runtime_resume(struct device *dev) static int fimc_runtime_resume(struct device *dev)
{ {
struct fimc_dev *fimc = dev_get_drvdata(dev); struct fimc_dev *fimc = dev_get_drvdata(dev);
@ -1068,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state); dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
return ret; return ret;
} }
#endif
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static int fimc_resume(struct device *dev) static int fimc_resume(struct device *dev)

View file

@ -1563,7 +1563,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
if (!pm_runtime_enabled(dev)) { if (!pm_runtime_enabled(dev)) {
ret = clk_enable(fimc->clock); ret = clk_enable(fimc->clock);
if (ret < 0) if (ret < 0)
goto err_clk_put; goto err_sd;
} }
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev); fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
@ -1579,7 +1579,8 @@ static int fimc_lite_probe(struct platform_device *pdev)
return 0; return 0;
err_clk_dis: err_clk_dis:
clk_disable(fimc->clock); if (!pm_runtime_enabled(dev))
clk_disable(fimc->clock);
err_sd: err_sd:
fimc_lite_unregister_capture_subdev(fimc); fimc_lite_unregister_capture_subdev(fimc);
err_clk_put: err_clk_put:
@ -1587,6 +1588,7 @@ err_clk_put:
return ret; return ret;
} }
#ifdef CONFIG_PM_RUNTIME
static int fimc_lite_runtime_resume(struct device *dev) static int fimc_lite_runtime_resume(struct device *dev)
{ {
struct fimc_lite *fimc = dev_get_drvdata(dev); struct fimc_lite *fimc = dev_get_drvdata(dev);
@ -1602,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
clk_disable(fimc->clock); clk_disable(fimc->clock);
return 0; return 0;
} }
#endif
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static int fimc_lite_resume(struct device *dev) static int fimc_lite_resume(struct device *dev)

View file

@ -175,7 +175,7 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{ {
.name = "YUV 4:2:0 planar, Y/CbCr", .name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12, .fourcc = V4L2_PIX_FMT_NV12,
.depth = 16, .depth = 12,
.colplanes = 2, .colplanes = 2,
.h_align = 1, .h_align = 1,
.v_align = 1, .v_align = 1,
@ -188,10 +188,10 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{ {
.name = "YUV 4:2:0 planar, Y/CbCr", .name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12, .fourcc = V4L2_PIX_FMT_NV12,
.depth = 16, .depth = 12,
.colplanes = 4, .colplanes = 2,
.h_align = 4, .h_align = 4,
.v_align = 1, .v_align = 4,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT | .flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE | SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_S5P | SJPEG_FMT_FLAG_S5P |

Some files were not shown because too many files have changed in this diff Show more