[XTENSA] Add support for configurable registers and coprocessors
The Xtensa architecture allows to define custom instructions and registers. Registers that are bound to a coprocessor are only accessible if the corresponding enable bit is set, which allows to implement a 'lazy' context switch mechanism. Other registers needs to be saved and restore at the time of the context switch or during interrupt handling. This patch adds support for these additional states: - save and restore registers that are used by the compiler upon interrupt entry and exit. - context switch additional registers unbound to any coprocessor - 'lazy' context switch of registers bound to a coprocessor - ptrace interface to provide access to additional registers - update configuration files in include/asm-xtensa/variant-fsf Signed-off-by: Chris Zankel <chris@zankel.net>
This commit is contained in:
parent
71d28e6c28
commit
c658eac628
17 changed files with 1076 additions and 885 deletions
|
@ -63,6 +63,8 @@ int main(void)
|
|||
DEFINE(PT_SIZE, sizeof(struct pt_regs));
|
||||
DEFINE(PT_AREG_END, offsetof (struct pt_regs, areg[XCHAL_NUM_AREGS]));
|
||||
DEFINE(PT_USER_SIZE, offsetof(struct pt_regs, areg[XCHAL_NUM_AREGS]));
|
||||
DEFINE(PT_XTREGS_OPT, offsetof(struct pt_regs, xtregs_opt));
|
||||
DEFINE(XTREGS_OPT_SIZE, sizeof(xtregs_opt_t));
|
||||
|
||||
/* struct task_struct */
|
||||
DEFINE(TASK_PTRACE, offsetof (struct task_struct, ptrace));
|
||||
|
@ -76,7 +78,19 @@ int main(void)
|
|||
/* struct thread_info (offset from start_struct) */
|
||||
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
|
||||
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
|
||||
DEFINE(THREAD_CP_SAVE, offsetof (struct task_struct, thread.cp_save));
|
||||
DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp));
|
||||
DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp));
|
||||
#endif
|
||||
DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user));
|
||||
DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t));
|
||||
DEFINE(THREAD_CURRENT_DS, offsetof (struct task_struct, thread.current_ds));
|
||||
|
||||
/* struct mm_struct */
|
||||
|
|
|
@ -8,193 +8,328 @@
|
|||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2003 - 2005 Tensilica Inc.
|
||||
*
|
||||
* Marc Gauthier <marc@tensilica.com> <marc@alumni.uwaterloo.ca>
|
||||
* Copyright (C) 2003 - 2007 Tensilica Inc.
|
||||
*/
|
||||
|
||||
/*
|
||||
* This module contains a table that describes the layout of the various
|
||||
* custom registers and states associated with each coprocessor, as well
|
||||
* as those not associated with any coprocessor ("extra state").
|
||||
* This table is included with core dumps and is available via the ptrace
|
||||
* interface, allowing the layout of such register/state information to
|
||||
* be modified in the kernel without affecting the debugger. Each
|
||||
* register or state is identified using a 32-bit "libdb target number"
|
||||
* assigned when the Xtensa processor is generated.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/coprocessor.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/current.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
#if XCHAL_HAVE_CP
|
||||
/*
|
||||
* Entry condition:
|
||||
*
|
||||
* a0: trashed, original value saved on stack (PT_AREG0)
|
||||
* a1: a1
|
||||
* a2: new stack pointer, original in DEPC
|
||||
* a3: dispatch table
|
||||
* depc: a2, original value saved on stack (PT_DEPC)
|
||||
* excsave_1: a3
|
||||
*
|
||||
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
|
||||
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
|
||||
*/
|
||||
|
||||
#define CP_LAST ((XCHAL_CP_MAX - 1) * COPROCESSOR_INFO_SIZE)
|
||||
/* IO protection is currently unsupported. */
|
||||
|
||||
ENTRY(release_coprocessors)
|
||||
ENTRY(fast_io_protect)
|
||||
wsr a0, EXCSAVE_1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0
|
||||
|
||||
entry a1, 16
|
||||
# a2: task
|
||||
movi a3, 1 << XCHAL_CP_MAX # a3: coprocessor-bit
|
||||
movi a4, coprocessor_info+CP_LAST # a4: owner-table
|
||||
# a5: tmp
|
||||
movi a6, 0 # a6: 0
|
||||
rsil a7, LOCKLEVEL # a7: PS
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
|
||||
1: /* Check if task is coprocessor owner of coprocessor[i]. */
|
||||
/*
|
||||
* Macros for lazy context switch.
|
||||
*/
|
||||
|
||||
l32i a5, a4, COPROCESSOR_INFO_OWNER
|
||||
srli a3, a3, 1
|
||||
#define SAVE_CP_REGS(x) \
|
||||
.align 4; \
|
||||
.Lsave_cp_regs_cp##x: \
|
||||
.if XTENSA_HAVE_COPROCESSOR(x); \
|
||||
xchal_cp##x##_store a2 a4 a5 a6 a7; \
|
||||
.endif; \
|
||||
jx a0
|
||||
|
||||
#define SAVE_CP_REGS_TAB(x) \
|
||||
.if XTENSA_HAVE_COPROCESSOR(x); \
|
||||
.long .Lsave_cp_regs_cp##x - .Lsave_cp_regs_jump_table; \
|
||||
.else; \
|
||||
.long 0; \
|
||||
.endif; \
|
||||
.long THREAD_XTREGS_CP##x
|
||||
|
||||
|
||||
#define LOAD_CP_REGS(x) \
|
||||
.align 4; \
|
||||
.Lload_cp_regs_cp##x: \
|
||||
.if XTENSA_HAVE_COPROCESSOR(x); \
|
||||
xchal_cp##x##_load a2 a4 a5 a6 a7; \
|
||||
.endif; \
|
||||
jx a0
|
||||
|
||||
#define LOAD_CP_REGS_TAB(x) \
|
||||
.if XTENSA_HAVE_COPROCESSOR(x); \
|
||||
.long .Lload_cp_regs_cp##x - .Lload_cp_regs_jump_table; \
|
||||
.else; \
|
||||
.long 0; \
|
||||
.endif; \
|
||||
.long THREAD_XTREGS_CP##x
|
||||
|
||||
SAVE_CP_REGS(0)
|
||||
SAVE_CP_REGS(1)
|
||||
SAVE_CP_REGS(2)
|
||||
SAVE_CP_REGS(3)
|
||||
SAVE_CP_REGS(4)
|
||||
SAVE_CP_REGS(5)
|
||||
SAVE_CP_REGS(6)
|
||||
SAVE_CP_REGS(7)
|
||||
|
||||
LOAD_CP_REGS(0)
|
||||
LOAD_CP_REGS(1)
|
||||
LOAD_CP_REGS(2)
|
||||
LOAD_CP_REGS(3)
|
||||
LOAD_CP_REGS(4)
|
||||
LOAD_CP_REGS(5)
|
||||
LOAD_CP_REGS(6)
|
||||
LOAD_CP_REGS(7)
|
||||
|
||||
.align 4
|
||||
.Lsave_cp_regs_jump_table:
|
||||
SAVE_CP_REGS_TAB(0)
|
||||
SAVE_CP_REGS_TAB(1)
|
||||
SAVE_CP_REGS_TAB(2)
|
||||
SAVE_CP_REGS_TAB(3)
|
||||
SAVE_CP_REGS_TAB(4)
|
||||
SAVE_CP_REGS_TAB(5)
|
||||
SAVE_CP_REGS_TAB(6)
|
||||
SAVE_CP_REGS_TAB(7)
|
||||
|
||||
.Lload_cp_regs_jump_table:
|
||||
LOAD_CP_REGS_TAB(0)
|
||||
LOAD_CP_REGS_TAB(1)
|
||||
LOAD_CP_REGS_TAB(2)
|
||||
LOAD_CP_REGS_TAB(3)
|
||||
LOAD_CP_REGS_TAB(4)
|
||||
LOAD_CP_REGS_TAB(5)
|
||||
LOAD_CP_REGS_TAB(6)
|
||||
LOAD_CP_REGS_TAB(7)
|
||||
|
||||
/*
|
||||
* coprocessor_save(buffer, index)
|
||||
* a2 a3
|
||||
* coprocessor_load(buffer, index)
|
||||
* a2 a3
|
||||
*
|
||||
* Save or load coprocessor registers for coprocessor 'index'.
|
||||
* The register values are saved to or loaded from them 'buffer' address.
|
||||
*
|
||||
* Note that these functions don't update the coprocessor_owner information!
|
||||
*
|
||||
*/
|
||||
|
||||
ENTRY(coprocessor_save)
|
||||
entry a1, 32
|
||||
s32i a0, a1, 0
|
||||
movi a0, .Lsave_cp_regs_jump_table
|
||||
addx8 a3, a3, a0
|
||||
l32i a3, a3, 0
|
||||
beqz a3, 1f
|
||||
addi a4, a4, -8
|
||||
beq a2, a5, 1b
|
||||
|
||||
/* Found an entry: Clear entry CPENABLE bit to disable CP. */
|
||||
|
||||
rsr a5, CPENABLE
|
||||
s32i a6, a4, COPROCESSOR_INFO_OWNER
|
||||
xor a5, a3, a5
|
||||
wsr a5, CPENABLE
|
||||
|
||||
bnez a3, 1b
|
||||
|
||||
1: wsr a7, PS
|
||||
rsync
|
||||
add a0, a0, a3
|
||||
callx0 a0
|
||||
1: l32i a0, a1, 0
|
||||
retw
|
||||
|
||||
|
||||
ENTRY(disable_coprocessor)
|
||||
entry sp, 16
|
||||
rsil a7, LOCKLEVEL
|
||||
rsr a3, CPENABLE
|
||||
movi a4, 1
|
||||
ssl a2
|
||||
sll a4, a4
|
||||
and a4, a3, a4
|
||||
xor a3, a3, a4
|
||||
wsr a3, CPENABLE
|
||||
wsr a7, PS
|
||||
rsync
|
||||
ENTRY(coprocessor_load)
|
||||
entry a1, 32
|
||||
s32i a0, a1, 0
|
||||
movi a0, .Lload_cp_regs_jump_table
|
||||
addx4 a3, a3, a0
|
||||
l32i a3, a3, 0
|
||||
beqz a3, 1f
|
||||
add a0, a0, a3
|
||||
callx0 a0
|
||||
1: l32i a0, a1, 0
|
||||
retw
|
||||
|
||||
ENTRY(enable_coprocessor)
|
||||
entry sp, 16
|
||||
rsil a7, LOCKLEVEL
|
||||
rsr a3, CPENABLE
|
||||
movi a4, 1
|
||||
ssl a2
|
||||
sll a4, a4
|
||||
or a3, a3, a4
|
||||
wsr a3, CPENABLE
|
||||
wsr a7, PS
|
||||
rsync
|
||||
retw
|
||||
|
||||
|
||||
ENTRY(save_coprocessor_extra)
|
||||
entry sp, 16
|
||||
xchal_extra_store_funcbody
|
||||
retw
|
||||
|
||||
ENTRY(restore_coprocessor_extra)
|
||||
entry sp, 16
|
||||
xchal_extra_load_funcbody
|
||||
retw
|
||||
|
||||
ENTRY(save_coprocessor_registers)
|
||||
entry sp, 16
|
||||
xchal_cpi_store_funcbody
|
||||
retw
|
||||
|
||||
ENTRY(restore_coprocessor_registers)
|
||||
entry sp, 16
|
||||
xchal_cpi_load_funcbody
|
||||
retw
|
||||
|
||||
|
||||
/*
|
||||
* The Xtensa compile-time HAL (core.h) XCHAL_*_SA_CONTENTS_LIBDB macros
|
||||
* describe the contents of coprocessor & extra save areas in terms of
|
||||
* undefined CONTENTS_LIBDB_{SREG,UREG,REGF} macros. We define these
|
||||
* latter macros here; they expand into a table of the format we want.
|
||||
* The general format is:
|
||||
* coprocessor_flush(struct task_info*, index)
|
||||
* a2 a3
|
||||
* coprocessor_restore(struct task_info*, index)
|
||||
* a2 a3
|
||||
*
|
||||
* CONTENTS_LIBDB_SREG(libdbnum, offset, size, align, rsv1, name, sregnum,
|
||||
* bitmask, rsv2, rsv3)
|
||||
* CONTENTS_LIBDB_UREG(libdbnum, offset, size, align, rsv1, name, uregnum,
|
||||
* bitmask, rsv2, rsv3)
|
||||
* CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index,
|
||||
* numentries, contentsize, regname_base,
|
||||
* regfile_name, rsv2, rsv3)
|
||||
* Save or load coprocessor registers for coprocessor 'index'.
|
||||
* The register values are saved to or loaded from the coprocessor area
|
||||
* inside the task_info structure.
|
||||
*
|
||||
* Note that these functions don't update the coprocessor_owner information!
|
||||
*
|
||||
* For this table, we only care about the <libdbnum>, <offset> and <size>
|
||||
* fields.
|
||||
*/
|
||||
|
||||
/* Map all XCHAL CONTENTS macros to the reg_entry asm macro defined below: */
|
||||
|
||||
#define CONTENTS_LIBDB_SREG(libdbnum,offset,size,align,rsv1,name,sregnum, \
|
||||
bitmask, rsv2, rsv3) \
|
||||
reg_entry libdbnum, offset, size ;
|
||||
#define CONTENTS_LIBDB_UREG(libdbnum,offset,size,align,rsv1,name,uregnum, \
|
||||
bitmask, rsv2, rsv3) \
|
||||
reg_entry libdbnum, offset, size ;
|
||||
#define CONTENTS_LIBDB_REGF(libdbnum, offset, size, align, rsv1, name, index, \
|
||||
numentries, contentsize, regname_base, \
|
||||
regfile_name, rsv2, rsv3) \
|
||||
reg_entry libdbnum, offset, size ;
|
||||
ENTRY(coprocessor_flush)
|
||||
entry a1, 32
|
||||
s32i a0, a1, 0
|
||||
movi a0, .Lsave_cp_regs_jump_table
|
||||
addx8 a3, a3, a0
|
||||
l32i a4, a3, 4
|
||||
l32i a3, a3, 0
|
||||
add a2, a2, a4
|
||||
beqz a3, 1f
|
||||
add a0, a0, a3
|
||||
callx0 a0
|
||||
1: l32i a0, a1, 0
|
||||
retw
|
||||
|
||||
/* A single table entry: */
|
||||
.macro reg_entry libdbnum, offset, size
|
||||
.ifne (__last_offset-(__last_group_offset+\offset))
|
||||
/* padding entry */
|
||||
.word (0xFC000000+__last_offset-(__last_group_offset+\offset))
|
||||
.endif
|
||||
.word \libdbnum /* actual entry */
|
||||
.set __last_offset, __last_group_offset+\offset+\size
|
||||
.endm /* reg_entry */
|
||||
|
||||
|
||||
/* Table entry that marks the beginning of a group (coprocessor or "extra"): */
|
||||
.macro reg_group cpnum, num_entries, align
|
||||
.set __last_group_offset, (__last_offset + \align- 1) & -\align
|
||||
.ifne \num_entries
|
||||
.word 0xFD000000+(\cpnum<<16)+\num_entries
|
||||
.endif
|
||||
.endm /* reg_group */
|
||||
ENTRY(coprocessor_restore)
|
||||
entry a1, 32
|
||||
s32i a0, a1, 0
|
||||
movi a0, .Lload_cp_regs_jump_table
|
||||
addx4 a3, a3, a0
|
||||
l32i a4, a3, 4
|
||||
l32i a3, a3, 0
|
||||
add a2, a2, a4
|
||||
beqz a3, 1f
|
||||
add a0, a0, a3
|
||||
callx0 a0
|
||||
1: l32i a0, a1, 0
|
||||
retw
|
||||
|
||||
/*
|
||||
* Register info tables.
|
||||
* Entry condition:
|
||||
*
|
||||
* a0: trashed, original value saved on stack (PT_AREG0)
|
||||
* a1: a1
|
||||
* a2: new stack pointer, original in DEPC
|
||||
* a3: dispatch table
|
||||
* depc: a2, original value saved on stack (PT_DEPC)
|
||||
* excsave_1: a3
|
||||
*
|
||||
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
|
||||
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
|
||||
*/
|
||||
|
||||
.section .rodata, "a"
|
||||
.globl _xtensa_reginfo_tables
|
||||
.globl _xtensa_reginfo_table_size
|
||||
.align 4
|
||||
_xtensa_reginfo_table_size:
|
||||
.word _xtensa_reginfo_table_end - _xtensa_reginfo_tables
|
||||
ENTRY(fast_coprocessor_double)
|
||||
wsr a0, EXCSAVE_1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0
|
||||
|
||||
_xtensa_reginfo_tables:
|
||||
.set __last_offset, 0
|
||||
reg_group 0xFF, XCHAL_EXTRA_SA_CONTENTS_LIBDB_NUM, XCHAL_EXTRA_SA_ALIGN
|
||||
XCHAL_EXTRA_SA_CONTENTS_LIBDB
|
||||
reg_group 0, XCHAL_CP0_SA_CONTENTS_LIBDB_NUM, XCHAL_CP0_SA_ALIGN
|
||||
XCHAL_CP0_SA_CONTENTS_LIBDB
|
||||
reg_group 1, XCHAL_CP1_SA_CONTENTS_LIBDB_NUM, XCHAL_CP1_SA_ALIGN
|
||||
XCHAL_CP1_SA_CONTENTS_LIBDB
|
||||
reg_group 2, XCHAL_CP2_SA_CONTENTS_LIBDB_NUM, XCHAL_CP2_SA_ALIGN
|
||||
XCHAL_CP2_SA_CONTENTS_LIBDB
|
||||
reg_group 3, XCHAL_CP3_SA_CONTENTS_LIBDB_NUM, XCHAL_CP3_SA_ALIGN
|
||||
XCHAL_CP3_SA_CONTENTS_LIBDB
|
||||
reg_group 4, XCHAL_CP4_SA_CONTENTS_LIBDB_NUM, XCHAL_CP4_SA_ALIGN
|
||||
XCHAL_CP4_SA_CONTENTS_LIBDB
|
||||
reg_group 5, XCHAL_CP5_SA_CONTENTS_LIBDB_NUM, XCHAL_CP5_SA_ALIGN
|
||||
XCHAL_CP5_SA_CONTENTS_LIBDB
|
||||
reg_group 6, XCHAL_CP6_SA_CONTENTS_LIBDB_NUM, XCHAL_CP6_SA_ALIGN
|
||||
XCHAL_CP6_SA_CONTENTS_LIBDB
|
||||
reg_group 7, XCHAL_CP7_SA_CONTENTS_LIBDB_NUM, XCHAL_CP7_SA_ALIGN
|
||||
XCHAL_CP7_SA_CONTENTS_LIBDB
|
||||
.word 0xFC000000 /* invalid register number,marks end of table*/
|
||||
_xtensa_reginfo_table_end:
|
||||
#endif
|
||||
|
||||
ENTRY(fast_coprocessor)
|
||||
|
||||
/* Save remaining registers a1-a3 and SAR */
|
||||
|
||||
xsr a3, EXCSAVE_1
|
||||
s32i a3, a2, PT_AREG3
|
||||
rsr a3, SAR
|
||||
s32i a1, a2, PT_AREG1
|
||||
s32i a3, a2, PT_SAR
|
||||
mov a1, a2
|
||||
rsr a2, DEPC
|
||||
s32i a2, a1, PT_AREG2
|
||||
|
||||
/*
|
||||
* The hal macros require up to 4 temporary registers. We use a3..a6.
|
||||
*/
|
||||
|
||||
s32i a4, a1, PT_AREG4
|
||||
s32i a5, a1, PT_AREG5
|
||||
s32i a6, a1, PT_AREG6
|
||||
|
||||
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
|
||||
|
||||
rsr a3, EXCCAUSE
|
||||
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
|
||||
|
||||
/* Set corresponding CPENABLE bit -> (sar:cp-index, a3: 1<<cp-index)*/
|
||||
|
||||
ssl a3 # SAR: 32 - coprocessor_number
|
||||
movi a2, 1
|
||||
rsr a0, CPENABLE
|
||||
sll a2, a2
|
||||
or a0, a0, a2
|
||||
wsr a0, CPENABLE
|
||||
rsync
|
||||
|
||||
/* Retrieve previous owner. (a3 still holds CP number) */
|
||||
|
||||
movi a0, coprocessor_owner # list of owners
|
||||
addx4 a0, a3, a0 # entry for CP
|
||||
l32i a4, a0, 0
|
||||
|
||||
beqz a4, 1f # skip 'save' if no previous owner
|
||||
|
||||
/* Disable coprocessor for previous owner. (a2 = 1 << CP number) */
|
||||
|
||||
l32i a5, a4, THREAD_CPENABLE
|
||||
xor a5, a5, a2 # (1 << cp-id) still in a2
|
||||
s32i a5, a4, THREAD_CPENABLE
|
||||
|
||||
/*
|
||||
* Get context save area and 'call' save routine.
|
||||
* (a4 still holds previous owner (thread_info), a3 CP number)
|
||||
*/
|
||||
|
||||
movi a5, .Lsave_cp_regs_jump_table
|
||||
movi a0, 2f # a0: 'return' address
|
||||
addx8 a3, a3, a5 # a3: coprocessor number
|
||||
l32i a2, a3, 4 # a2: xtregs offset
|
||||
l32i a3, a3, 0 # a3: jump offset
|
||||
add a2, a2, a4
|
||||
add a4, a3, a5 # a4: address of save routine
|
||||
jx a4
|
||||
|
||||
/* Note that only a0 and a1 were preserved. */
|
||||
|
||||
2: rsr a3, EXCCAUSE
|
||||
addi a3, a3, -EXCCAUSE_COPROCESSOR0_DISABLED
|
||||
movi a0, coprocessor_owner
|
||||
addx4 a0, a3, a0
|
||||
|
||||
/* Set new 'owner' (a0 points to the CP owner, a3 contains the CP nr) */
|
||||
|
||||
1: GET_THREAD_INFO (a4, a1)
|
||||
s32i a4, a0, 0
|
||||
|
||||
/* Get context save area and 'call' load routine. */
|
||||
|
||||
movi a5, .Lload_cp_regs_jump_table
|
||||
movi a0, 1f
|
||||
addx8 a3, a3, a5
|
||||
l32i a2, a3, 4 # a2: xtregs offset
|
||||
l32i a3, a3, 0 # a3: jump offset
|
||||
add a2, a2, a4
|
||||
add a4, a3, a5
|
||||
jx a4
|
||||
|
||||
/* Restore all registers and return from exception handler. */
|
||||
|
||||
1: l32i a6, a1, PT_AREG6
|
||||
l32i a5, a1, PT_AREG5
|
||||
l32i a4, a1, PT_AREG4
|
||||
|
||||
l32i a0, a1, PT_SAR
|
||||
l32i a3, a1, PT_AREG3
|
||||
l32i a2, a1, PT_AREG2
|
||||
wsr a0, SAR
|
||||
l32i a0, a1, PT_AREG0
|
||||
l32i a1, a1, PT_AREG1
|
||||
|
||||
rfe
|
||||
|
||||
.data
|
||||
ENTRY(coprocessor_owner)
|
||||
.fill XCHAL_CP_MAX, 4, 0
|
||||
|
||||
#endif /* XTENSA_HAVE_COPROCESSORS */
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#include <asm/page.h>
|
||||
#include <asm/signal.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/variant/tie-asm.h>
|
||||
|
||||
/* Unimplemented features. */
|
||||
|
||||
|
@ -213,19 +214,7 @@ _user_exception:
|
|||
|
||||
/* We are back to the original stack pointer (a1) */
|
||||
|
||||
2:
|
||||
#if XCHAL_EXTRA_SA_SIZE
|
||||
|
||||
/* For user exceptions, save the extra state into the user's TCB.
|
||||
* Note: We must assume that xchal_extra_store_funcbody destroys a2..a15
|
||||
*/
|
||||
|
||||
GET_CURRENT(a2,a1)
|
||||
addi a2, a2, THREAD_CP_SAVE
|
||||
xchal_extra_store_funcbody
|
||||
#endif
|
||||
|
||||
/* Now, jump to the common exception handler. */
|
||||
2: /* Now, jump to the common exception handler. */
|
||||
|
||||
j common_exception
|
||||
|
||||
|
@ -381,6 +370,10 @@ common_exception:
|
|||
s32i a2, a1, PT_LBEG
|
||||
s32i a3, a1, PT_LEND
|
||||
|
||||
/* Save optional registers. */
|
||||
|
||||
save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT
|
||||
|
||||
/* Go to second-level dispatcher. Set up parameters to pass to the
|
||||
* exception handler and call the exception handler.
|
||||
*/
|
||||
|
@ -452,22 +445,6 @@ common_exception_return:
|
|||
|
||||
4: /* a2 holds GET_CURRENT(a2,a1) */
|
||||
|
||||
#if XCHAL_EXTRA_SA_SIZE
|
||||
|
||||
/* For user exceptions, restore the extra state from the user's TCB. */
|
||||
|
||||
/* Note: a2 still contains GET_CURRENT(a2,a1) */
|
||||
addi a2, a2, THREAD_CP_SAVE
|
||||
xchal_extra_load_funcbody
|
||||
|
||||
/* We must assume that xchal_extra_store_funcbody destroys
|
||||
* registers a2..a15. FIXME, this list can eventually be
|
||||
* reduced once real register requirements of the macro are
|
||||
* finalized. */
|
||||
|
||||
#endif /* XCHAL_EXTRA_SA_SIZE */
|
||||
|
||||
|
||||
/* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */
|
||||
|
||||
l32i a2, a1, PT_WINDOWBASE
|
||||
|
@ -614,6 +591,12 @@ kernel_exception_exit:
|
|||
|
||||
common_exception_exit:
|
||||
|
||||
/* Restore optional registers. */
|
||||
|
||||
load_xtregs_opt a1 a3 a4 a5 a6 a7 PT_XTREGS_OPT
|
||||
|
||||
/* Restore address registers. */
|
||||
|
||||
_bbsi.l a2, 1, 1f
|
||||
l32i a4, a1, PT_AREG4
|
||||
l32i a5, a1, PT_AREG5
|
||||
|
@ -1146,7 +1129,6 @@ CATCH
|
|||
* excsave_1: a3
|
||||
*
|
||||
* Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler.
|
||||
* Note: We don't need to save a2 in depc (return value)
|
||||
*/
|
||||
|
||||
ENTRY(fast_syscall_spill_registers)
|
||||
|
@ -1162,29 +1144,31 @@ ENTRY(fast_syscall_spill_registers)
|
|||
|
||||
rsr a0, SAR
|
||||
xsr a3, EXCSAVE_1 # restore a3 and excsave_1
|
||||
s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4
|
||||
s32i a3, a2, PT_AREG3
|
||||
s32i a4, a2, PT_AREG4
|
||||
s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
|
||||
|
||||
/* The spill routine might clobber a7, a11, and a15. */
|
||||
|
||||
s32i a7, a2, PT_AREG5
|
||||
s32i a11, a2, PT_AREG6
|
||||
s32i a15, a2, PT_AREG7
|
||||
s32i a7, a2, PT_AREG7
|
||||
s32i a11, a2, PT_AREG11
|
||||
s32i a15, a2, PT_AREG15
|
||||
|
||||
call0 _spill_registers # destroys a3, DEPC, and SAR
|
||||
call0 _spill_registers # destroys a3, a4, and SAR
|
||||
|
||||
/* Advance PC, restore registers and SAR, and return from exception. */
|
||||
|
||||
l32i a3, a2, PT_AREG4
|
||||
l32i a3, a2, PT_AREG5
|
||||
l32i a4, a2, PT_AREG4
|
||||
l32i a0, a2, PT_AREG0
|
||||
wsr a3, SAR
|
||||
l32i a3, a2, PT_AREG3
|
||||
|
||||
/* Restore clobbered registers. */
|
||||
|
||||
l32i a7, a2, PT_AREG5
|
||||
l32i a11, a2, PT_AREG6
|
||||
l32i a15, a2, PT_AREG7
|
||||
l32i a7, a2, PT_AREG7
|
||||
l32i a11, a2, PT_AREG11
|
||||
l32i a15, a2, PT_AREG15
|
||||
|
||||
movi a2, 0
|
||||
rfe
|
||||
|
@ -1257,9 +1241,9 @@ fast_syscall_spill_registers_fixup:
|
|||
|
||||
movi a3, exc_table
|
||||
rsr a0, EXCCAUSE
|
||||
addx4 a0, a0, a3 # find entry in table
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
|
||||
jx a0
|
||||
addx4 a0, a0, a3 # find entry in table
|
||||
l32i a0, a0, EXC_TABLE_FAST_USER # load handler
|
||||
jx a0
|
||||
|
||||
fast_syscall_spill_registers_fixup_return:
|
||||
|
||||
|
@ -1297,7 +1281,7 @@ fast_syscall_spill_registers_fixup_return:
|
|||
* This is not a real function. The following conditions must be met:
|
||||
*
|
||||
* - must be called with call0.
|
||||
* - uses DEPC, a3 and SAR.
|
||||
* - uses a3, a4 and SAR.
|
||||
* - the last 'valid' register of each frame are clobbered.
|
||||
* - the caller must have registered a fixup handler
|
||||
* (or be inside a critical section)
|
||||
|
@ -1309,41 +1293,39 @@ ENTRY(_spill_registers)
|
|||
/*
|
||||
* Rotate ws so that the current windowbase is at bit 0.
|
||||
* Assume ws = xxxwww1yy (www1 current window frame).
|
||||
* Rotate ws right so that a2 = yyxxxwww1.
|
||||
* Rotate ws right so that a4 = yyxxxwww1.
|
||||
*/
|
||||
|
||||
wsr a2, DEPC # preserve a2
|
||||
rsr a2, WINDOWBASE
|
||||
rsr a4, WINDOWBASE
|
||||
rsr a3, WINDOWSTART # a3 = xxxwww1yy
|
||||
ssr a2 # holds WB
|
||||
slli a2, a3, WSBITS
|
||||
or a3, a3, a2 # a3 = xxxwww1yyxxxwww1yy
|
||||
ssr a4 # holds WB
|
||||
slli a4, a3, WSBITS
|
||||
or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
|
||||
srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
|
||||
|
||||
/* We are done if there are no more than the current register frame. */
|
||||
|
||||
extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
|
||||
movi a2, (1 << (WSBITS-1))
|
||||
movi a4, (1 << (WSBITS-1))
|
||||
_beqz a3, .Lnospill # only one active frame? jump
|
||||
|
||||
/* We want 1 at the top, so that we return to the current windowbase */
|
||||
|
||||
or a3, a3, a2 # 1yyxxxwww
|
||||
or a3, a3, a4 # 1yyxxxwww
|
||||
|
||||
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
|
||||
|
||||
wsr a3, WINDOWSTART # save shifted windowstart
|
||||
neg a2, a3
|
||||
and a3, a2, a3 # first bit set from right: 000010000
|
||||
neg a4, a3
|
||||
and a3, a4, a3 # first bit set from right: 000010000
|
||||
|
||||
ffs_ws a2, a3 # a2: shifts to skip empty frames
|
||||
ffs_ws a4, a3 # a4: shifts to skip empty frames
|
||||
movi a3, WSBITS
|
||||
sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right
|
||||
ssr a2 # save in SAR for later.
|
||||
sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
|
||||
ssr a4 # save in SAR for later.
|
||||
|
||||
rsr a3, WINDOWBASE
|
||||
add a3, a3, a2
|
||||
rsr a2, DEPC # restore a2
|
||||
add a3, a3, a4
|
||||
wsr a3, WINDOWBASE
|
||||
rsync
|
||||
|
||||
|
@ -1373,7 +1355,6 @@ ENTRY(_spill_registers)
|
|||
j .Lc12c
|
||||
|
||||
.Lnospill:
|
||||
rsr a2, DEPC
|
||||
ret
|
||||
|
||||
.Lloop: _bbsi.l a3, 1, .Lc4
|
||||
|
@ -1810,154 +1791,6 @@ ENTRY(fast_store_prohibited)
|
|||
1: j _user_exception
|
||||
|
||||
|
||||
#if XCHAL_EXTRA_SA_SIZE
|
||||
|
||||
#warning fast_coprocessor untested
|
||||
|
||||
/*
|
||||
* Entry condition:
|
||||
*
|
||||
* a0: trashed, original value saved on stack (PT_AREG0)
|
||||
* a1: a1
|
||||
* a2: new stack pointer, original in DEPC
|
||||
* a3: dispatch table
|
||||
* depc: a2, original value saved on stack (PT_DEPC)
|
||||
* excsave_1: a3
|
||||
*
|
||||
* PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC
|
||||
* < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception
|
||||
*/
|
||||
|
||||
ENTRY(fast_coprocessor_double)
|
||||
wsr a0, EXCSAVE_1
|
||||
movi a0, unrecoverable_exception
|
||||
callx0 a0
|
||||
|
||||
ENTRY(fast_coprocessor)
|
||||
|
||||
/* Fatal if we are in a double exception. */
|
||||
|
||||
l32i a0, a2, PT_DEPC
|
||||
_bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double
|
||||
|
||||
/* Save some registers a1, a3, a4, SAR */
|
||||
|
||||
xsr a3, EXCSAVE_1
|
||||
s32i a3, a2, PT_AREG3
|
||||
rsr a3, SAR
|
||||
s32i a4, a2, PT_AREG4
|
||||
s32i a1, a2, PT_AREG1
|
||||
s32i a5, a1, PT_AREG5
|
||||
s32i a3, a2, PT_SAR
|
||||
mov a1, a2
|
||||
|
||||
/* Currently, the HAL macros only guarantee saving a0 and a1.
|
||||
* These can and will be refined in the future, but for now,
|
||||
* just save the remaining registers of a2...a15.
|
||||
*/
|
||||
s32i a6, a1, PT_AREG6
|
||||
s32i a7, a1, PT_AREG7
|
||||
s32i a8, a1, PT_AREG8
|
||||
s32i a9, a1, PT_AREG9
|
||||
s32i a10, a1, PT_AREG10
|
||||
s32i a11, a1, PT_AREG11
|
||||
s32i a12, a1, PT_AREG12
|
||||
s32i a13, a1, PT_AREG13
|
||||
s32i a14, a1, PT_AREG14
|
||||
s32i a15, a1, PT_AREG15
|
||||
|
||||
/* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */
|
||||
|
||||
rsr a0, EXCCAUSE
|
||||
addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED
|
||||
|
||||
/* Set corresponding CPENABLE bit */
|
||||
|
||||
movi a4, 1
|
||||
ssl a3 # SAR: 32 - coprocessor_number
|
||||
rsr a5, CPENABLE
|
||||
sll a4, a4
|
||||
or a4, a5, a4
|
||||
wsr a4, CPENABLE
|
||||
rsync
|
||||
movi a5, coprocessor_info # list of owner and offset into cp_save
|
||||
addx8 a0, a4, a5 # entry for CP
|
||||
|
||||
bne a4, a5, .Lload # bit wasn't set before, cp not in use
|
||||
|
||||
/* Now compare the current task with the owner of the coprocessor.
|
||||
* If they are the same, there is no reason to save or restore any
|
||||
* coprocessor state. Having already enabled the coprocessor,
|
||||
* branch ahead to return.
|
||||
*/
|
||||
GET_CURRENT(a5,a1)
|
||||
l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP
|
||||
beq a4, a5, .Ldone
|
||||
|
||||
/* Find location to dump current coprocessor state:
|
||||
* task_struct->task_cp_save_offset + coprocessor_offset[coprocessor]
|
||||
*
|
||||
* Note: a0 pointer to the entry in the coprocessor owner table,
|
||||
* a3 coprocessor number,
|
||||
* a4 current owner of coprocessor.
|
||||
*/
|
||||
l32i a5, a0, COPROCESSOR_INFO_OFFSET
|
||||
addi a2, a4, THREAD_CP_SAVE
|
||||
add a2, a2, a5
|
||||
|
||||
/* Store current coprocessor states. (a5 still has CP number) */
|
||||
|
||||
xchal_cpi_store_funcbody
|
||||
|
||||
/* The macro might have destroyed a3 (coprocessor number), but
|
||||
* SAR still has 32 - coprocessor_number!
|
||||
*/
|
||||
movi a3, 32
|
||||
rsr a4, SAR
|
||||
sub a3, a3, a4
|
||||
|
||||
.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into
|
||||
* the coprocessor owner table.
|
||||
*
|
||||
* Note: a0 pointer to the entry in the coprocessor owner table,
|
||||
* a3 coprocessor number.
|
||||
*/
|
||||
GET_CURRENT(a4,a1)
|
||||
s32i a4, a0, 0
|
||||
|
||||
/* Find location from where to restore the current coprocessor state.*/
|
||||
|
||||
l32i a5, a0, COPROCESSOR_INFO_OFFSET
|
||||
addi a2, a4, THREAD_CP_SAVE
|
||||
add a2, a2, a4
|
||||
|
||||
xchal_cpi_load_funcbody
|
||||
|
||||
/* We must assume that the xchal_cpi_store_funcbody macro destroyed
|
||||
* registers a2..a15.
|
||||
*/
|
||||
|
||||
.Ldone: l32i a15, a1, PT_AREG15
|
||||
l32i a14, a1, PT_AREG14
|
||||
l32i a13, a1, PT_AREG13
|
||||
l32i a12, a1, PT_AREG12
|
||||
l32i a11, a1, PT_AREG11
|
||||
l32i a10, a1, PT_AREG10
|
||||
l32i a9, a1, PT_AREG9
|
||||
l32i a8, a1, PT_AREG8
|
||||
l32i a7, a1, PT_AREG7
|
||||
l32i a6, a1, PT_AREG6
|
||||
l32i a5, a1, PT_AREG5
|
||||
l32i a4, a1, PT_AREG4
|
||||
l32i a3, a1, PT_AREG3
|
||||
l32i a2, a1, PT_AREG2
|
||||
l32i a0, a1, PT_AREG0
|
||||
l32i a1, a1, PT_AREG1
|
||||
|
||||
rfe
|
||||
|
||||
#endif /* XCHAL_EXTRA_SA_SIZE */
|
||||
|
||||
/*
|
||||
* System Calls.
|
||||
*
|
||||
|
@ -2066,20 +1899,36 @@ ENTRY(_switch_to)
|
|||
|
||||
entry a1, 16
|
||||
|
||||
mov a4, a3 # preserve a3
|
||||
mov a12, a2 # preserve 'prev' (a2)
|
||||
mov a13, a3 # and 'next' (a3)
|
||||
|
||||
s32i a0, a2, THREAD_RA # save return address
|
||||
s32i a1, a2, THREAD_SP # save stack pointer
|
||||
l32i a4, a2, TASK_THREAD_INFO
|
||||
l32i a5, a3, TASK_THREAD_INFO
|
||||
|
||||
/* Disable ints while we manipulate the stack pointer; spill regs. */
|
||||
save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
|
||||
|
||||
movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL
|
||||
xsr a5, PS
|
||||
s32i a0, a12, THREAD_RA # save return address
|
||||
s32i a1, a12, THREAD_SP # save stack pointer
|
||||
|
||||
/* Disable ints while we manipulate the stack pointer. */
|
||||
|
||||
movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
|
||||
xsr a14, PS
|
||||
rsr a3, EXCSAVE_1
|
||||
rsync
|
||||
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
|
||||
|
||||
call0 _spill_registers
|
||||
/* Switch CPENABLE */
|
||||
|
||||
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
|
||||
l32i a3, a5, THREAD_CPENABLE
|
||||
xsr a3, CPENABLE
|
||||
s32i a3, a4, THREAD_CPENABLE
|
||||
#endif
|
||||
|
||||
/* Flush register file. */
|
||||
|
||||
call0 _spill_registers # destroys a3, a4, and SAR
|
||||
|
||||
/* Set kernel stack (and leave critical section)
|
||||
* Note: It's save to set it here. The stack will not be overwritten
|
||||
|
@ -2087,19 +1936,21 @@ ENTRY(_switch_to)
|
|||
* we return from kernel space.
|
||||
*/
|
||||
|
||||
l32i a0, a4, TASK_THREAD_INFO
|
||||
rsr a3, EXCSAVE_1 # exc_table
|
||||
movi a1, 0
|
||||
addi a0, a0, PT_REGS_OFFSET
|
||||
s32i a1, a3, EXC_TABLE_FIXUP
|
||||
s32i a0, a3, EXC_TABLE_KSTK
|
||||
movi a6, 0
|
||||
addi a7, a5, PT_REGS_OFFSET
|
||||
s32i a6, a3, EXC_TABLE_FIXUP
|
||||
s32i a7, a3, EXC_TABLE_KSTK
|
||||
|
||||
/* restore context of the task that 'next' addresses */
|
||||
|
||||
l32i a0, a4, THREAD_RA /* restore return address */
|
||||
l32i a1, a4, THREAD_SP /* restore stack pointer */
|
||||
l32i a0, a13, THREAD_RA # restore return address
|
||||
l32i a1, a13, THREAD_SP # restore stack pointer
|
||||
|
||||
wsr a5, PS
|
||||
load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
|
||||
|
||||
wsr a14, PS
|
||||
mov a2, a12 # return 'prev'
|
||||
rsync
|
||||
|
||||
retw
|
||||
|
|
|
@ -52,6 +52,55 @@ void (*pm_power_off)(void) = NULL;
|
|||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
|
||||
void coprocessor_release_all(struct thread_info *ti)
|
||||
{
|
||||
unsigned long cpenable;
|
||||
int i;
|
||||
|
||||
/* Make sure we don't switch tasks during this operation. */
|
||||
|
||||
preempt_disable();
|
||||
|
||||
/* Walk through all cp owners and release it for the requested one. */
|
||||
|
||||
cpenable = ti->cpenable;
|
||||
|
||||
for (i = 0; i < XCHAL_CP_MAX; i++) {
|
||||
if (coprocessor_owner[i] == ti) {
|
||||
coprocessor_owner[i] = 0;
|
||||
cpenable &= ~(1 << i);
|
||||
}
|
||||
}
|
||||
|
||||
ti->cpenable = cpenable;
|
||||
coprocessor_clear_cpenable();
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void coprocessor_flush_all(struct thread_info *ti)
|
||||
{
|
||||
unsigned long cpenable;
|
||||
int i;
|
||||
|
||||
preempt_disable();
|
||||
|
||||
cpenable = ti->cpenable;
|
||||
|
||||
for (i = 0; i < XCHAL_CP_MAX; i++) {
|
||||
if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti)
|
||||
coprocessor_flush(ti, i);
|
||||
cpenable >>= 1;
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* Powermanagement idle function, if any is provided by the platform.
|
||||
*/
|
||||
|
@ -71,15 +120,36 @@ void cpu_idle(void)
|
|||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
* This is called when the thread calls exit().
|
||||
*/
|
||||
|
||||
void exit_thread(void)
|
||||
{
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
coprocessor_release_all(current_thread_info());
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush thread state. This is called when a thread does an execve()
|
||||
* Note that we flush coprocessor registers for the case execve fails.
|
||||
*/
|
||||
void flush_thread(void)
|
||||
{
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
struct thread_info *ti = current_thread_info();
|
||||
coprocessor_flush_all(ti);
|
||||
coprocessor_release_all(ti);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* This is called before the thread is copied.
|
||||
*/
|
||||
void prepare_to_copy(struct task_struct *tsk)
|
||||
{
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
coprocessor_flush_all(task_thread_info(tsk));
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -107,6 +177,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
|||
struct task_struct * p, struct pt_regs * regs)
|
||||
{
|
||||
struct pt_regs *childregs;
|
||||
struct thread_info *ti;
|
||||
unsigned long tos;
|
||||
int user_mode = user_mode(regs);
|
||||
|
||||
|
@ -128,13 +199,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
|||
p->set_child_tid = p->clear_child_tid = NULL;
|
||||
p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
|
||||
p->thread.sp = (unsigned long)childregs;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
|
||||
int len = childregs->wmask & ~0xf;
|
||||
childregs->areg[1] = usp;
|
||||
memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
|
||||
®s->areg[XCHAL_NUM_AREGS - len/4], len);
|
||||
|
||||
// FIXME: we need to set THREADPTR in thread_info...
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
childregs->areg[2] = childregs->areg[6];
|
||||
|
||||
|
@ -142,6 +214,12 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
|
|||
/* In kernel space, we start a new thread with a new stack. */
|
||||
childregs->wmask = 1;
|
||||
}
|
||||
|
||||
#if (XTENSA_HAVE_COPROCESSORS || XTENSA_HAVE_IO_PORTS)
|
||||
ti = task_thread_info(p);
|
||||
ti->cpenable = 0;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -179,10 +257,6 @@ unsigned long get_wchan(struct task_struct *p)
|
|||
}
|
||||
|
||||
/*
|
||||
* do_copy_regs() gathers information from 'struct pt_regs' and
|
||||
* 'current->thread.areg[]' to fill in the xtensa_gregset_t
|
||||
* structure.
|
||||
*
|
||||
* xtensa_gregset_t and 'struct pt_regs' are vastly different formats
|
||||
* of processor registers. Besides different ordering,
|
||||
* xtensa_gregset_t contains non-live register information that
|
||||
|
@ -191,9 +265,20 @@ unsigned long get_wchan(struct task_struct *p)
|
|||
*
|
||||
*/
|
||||
|
||||
void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long wb, ws, wm;
|
||||
int live, last;
|
||||
|
||||
wb = regs->windowbase;
|
||||
ws = regs->windowstart;
|
||||
wm = regs->wmask;
|
||||
ws = ((ws >> wb) | (ws << (WSBITS - wb))) & ((1 << WSBITS) - 1);
|
||||
|
||||
/* Don't leak any random bits. */
|
||||
|
||||
memset(elfregs, 0, sizeof (elfregs));
|
||||
|
||||
/* Note: PS.EXCM is not set while user task is running; its
|
||||
* being set in regs->ps is for exception handling convenience.
|
||||
*/
|
||||
|
@ -204,159 +289,18 @@ void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
|
|||
elfregs->lend = regs->lend;
|
||||
elfregs->lcount = regs->lcount;
|
||||
elfregs->sar = regs->sar;
|
||||
elfregs->windowstart = ws;
|
||||
|
||||
memcpy (elfregs->a, regs->areg, sizeof(elfregs->a));
|
||||
live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
|
||||
last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
|
||||
memcpy(elfregs->a, regs->areg, live * 4);
|
||||
memcpy(elfregs->a + last, regs->areg + last, (wm >> 4) * 16);
|
||||
}
|
||||
|
||||
void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
|
||||
int dump_fpu(void)
|
||||
{
|
||||
do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
|
||||
}
|
||||
|
||||
|
||||
/* The inverse of do_copy_regs(). No error or sanity checking. */
|
||||
|
||||
void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
|
||||
unsigned long ps;
|
||||
|
||||
/* Note: PS.EXCM is not set while user task is running; it
|
||||
* needs to be set in regs->ps is for exception handling convenience.
|
||||
*/
|
||||
|
||||
ps = (regs->ps & ~ps_mask) | (elfregs->ps & ps_mask) | (1<<PS_EXCM_BIT);
|
||||
regs->ps = ps;
|
||||
regs->pc = elfregs->pc;
|
||||
regs->lbeg = elfregs->lbeg;
|
||||
regs->lend = elfregs->lend;
|
||||
regs->lcount = elfregs->lcount;
|
||||
regs->sar = elfregs->sar;
|
||||
|
||||
memcpy (regs->areg, elfregs->a, sizeof(regs->areg));
|
||||
}
|
||||
|
||||
/*
|
||||
* do_save_fpregs() gathers information from 'struct pt_regs' and
|
||||
* 'current->thread' to fill in the elf_fpregset_t structure.
|
||||
*
|
||||
* Core files and ptrace use elf_fpregset_t.
|
||||
*/
|
||||
|
||||
void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
#if XCHAL_HAVE_CP
|
||||
|
||||
extern unsigned char _xtensa_reginfo_tables[];
|
||||
extern unsigned _xtensa_reginfo_table_size;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
/* Before dumping coprocessor state from memory,
|
||||
* ensure any live coprocessor contents for this
|
||||
* task are first saved to memory:
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
|
||||
for (i = 0; i < XCHAL_CP_MAX; i++) {
|
||||
if (tsk == coprocessor_info[i].owner) {
|
||||
enable_coprocessor(i);
|
||||
save_coprocessor_registers(
|
||||
tsk->thread.cp_save+coprocessor_info[i].offset,i);
|
||||
disable_coprocessor(i);
|
||||
}
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Now dump coprocessor & extra state: */
|
||||
memcpy((unsigned char*)fpregs,
|
||||
_xtensa_reginfo_tables, _xtensa_reginfo_table_size);
|
||||
memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
|
||||
tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* The inverse of do_save_fpregs().
|
||||
* Copies coprocessor and extra state from fpregs into regs and tsk->thread.
|
||||
* Returns 0 on success, non-zero if layout doesn't match.
|
||||
*/
|
||||
|
||||
int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
#if XCHAL_HAVE_CP
|
||||
|
||||
extern unsigned char _xtensa_reginfo_tables[];
|
||||
extern unsigned _xtensa_reginfo_table_size;
|
||||
int i;
|
||||
unsigned long flags;
|
||||
|
||||
/* Make sure save area layouts match.
|
||||
* FIXME: in the future we could allow restoring from
|
||||
* a different layout of the same registers, by comparing
|
||||
* fpregs' table with _xtensa_reginfo_tables and matching
|
||||
* entries and copying registers one at a time.
|
||||
* Not too sure yet whether that's very useful.
|
||||
*/
|
||||
|
||||
if( memcmp((unsigned char*)fpregs,
|
||||
_xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Before restoring coprocessor state from memory,
|
||||
* ensure any live coprocessor contents for this
|
||||
* task are first invalidated.
|
||||
*/
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
for (i = 0; i < XCHAL_CP_MAX; i++) {
|
||||
if (tsk == coprocessor_info[i].owner) {
|
||||
enable_coprocessor(i);
|
||||
save_coprocessor_registers(
|
||||
tsk->thread.cp_save+coprocessor_info[i].offset,i);
|
||||
coprocessor_info[i].owner = 0;
|
||||
disable_coprocessor(i);
|
||||
}
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* Now restore coprocessor & extra state: */
|
||||
|
||||
memcpy(tsk->thread.cp_save,
|
||||
(unsigned char*)fpregs + _xtensa_reginfo_table_size,
|
||||
XTENSA_CP_EXTRA_SIZE);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
/*
|
||||
* Fill in the CP structure for a core dump for a particular task.
|
||||
*/
|
||||
|
||||
int
|
||||
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
|
||||
{
|
||||
return 0; /* no coprocessors active on this processor */
|
||||
}
|
||||
|
||||
/*
|
||||
* Fill in the CP structure for a core dump.
|
||||
* This includes any FPU coprocessor.
|
||||
* Here, we dump all coprocessors, and other ("extra") custom state.
|
||||
*
|
||||
* This function is called by elf_core_dump() in fs/binfmt_elf.c
|
||||
* (in which case 'regs' comes from calls to do_coredump, see signals.c).
|
||||
*/
|
||||
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
|
||||
{
|
||||
return dump_task_fpu(regs, current, r);
|
||||
}
|
||||
|
||||
asmlinkage
|
||||
long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
|
||||
|
@ -370,8 +314,8 @@ long xtensa_clone(unsigned long clone_flags, unsigned long newsp,
|
|||
}
|
||||
|
||||
/*
|
||||
* * xtensa_execve() executes a new program.
|
||||
* */
|
||||
* xtensa_execve() executes a new program.
|
||||
*/
|
||||
|
||||
asmlinkage
|
||||
long xtensa_execve(char __user *name, char __user * __user *argv,
|
||||
|
@ -386,7 +330,6 @@ long xtensa_execve(char __user *name, char __user * __user *argv,
|
|||
error = PTR_ERR(filename);
|
||||
if (IS_ERR(filename))
|
||||
goto out;
|
||||
// FIXME: release coprocessor??
|
||||
error = do_execve(filename, argv, envp, regs);
|
||||
if (error == 0) {
|
||||
task_lock(current);
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2001 - 2005 Tensilica Inc.
|
||||
* Copyright (C) 2001 - 2007 Tensilica Inc.
|
||||
*
|
||||
* Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
|
||||
* Chris Zankel <chris@zankel.net>
|
||||
|
@ -28,14 +28,10 @@
|
|||
#include <asm/uaccess.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/elf.h>
|
||||
|
||||
#define TEST_KERNEL // verify kernel operations FIXME: remove
|
||||
|
||||
#include <asm/coprocessor.h>
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
* Make sure single step bits etc are not set.
|
||||
* Called by kernel/ptrace.c when detaching to disable single stepping.
|
||||
*/
|
||||
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
|
@ -43,136 +39,233 @@ void ptrace_disable(struct task_struct *child)
|
|||
/* Nothing to do.. */
|
||||
}
|
||||
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
int ptrace_getregs(struct task_struct *child, void __user *uregs)
|
||||
{
|
||||
int ret = -EPERM;
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
xtensa_gregset_t __user *gregset = uregs;
|
||||
unsigned long wb = regs->windowbase;
|
||||
unsigned long ws = regs->windowstart;
|
||||
unsigned long wm = regs->wmask;
|
||||
int ret = 0;
|
||||
int live, last;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_PEEKTEXT: /* read word at location addr. */
|
||||
case PTRACE_PEEKDATA:
|
||||
ret = generic_ptrace_peekdata(child, addr, data);
|
||||
goto out;
|
||||
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
|
||||
return -EIO;
|
||||
|
||||
/* Read the word at location addr in the USER area. */
|
||||
/* Norm windowstart to a windowbase of 0. */
|
||||
|
||||
case PTRACE_PEEKUSR:
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
unsigned long tmp;
|
||||
ws = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
|
||||
|
||||
regs = task_pt_regs(child);
|
||||
tmp = 0; /* Default return value. */
|
||||
ret |= __put_user(regs->pc, &gregset->pc);
|
||||
ret |= __put_user(regs->ps & ~(1 << PS_EXCM_BIT), &gregset->ps);
|
||||
ret |= __put_user(regs->lbeg, &gregset->lbeg);
|
||||
ret |= __put_user(regs->lend, &gregset->lend);
|
||||
ret |= __put_user(regs->lcount, &gregset->lcount);
|
||||
ret |= __put_user(ws, &gregset->windowstart);
|
||||
|
||||
switch(addr) {
|
||||
live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
|
||||
last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
|
||||
ret |= __copy_to_user(gregset->a, regs->areg, live * 4);
|
||||
ret |= __copy_to_user(gregset->a + last, regs->areg + last, (wm>>4)*16);
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
int ptrace_setregs(struct task_struct *child, void __user *uregs)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
xtensa_gregset_t *gregset = uregs;
|
||||
const unsigned long ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
|
||||
unsigned long wm = regs->wmask;
|
||||
unsigned long ps;
|
||||
int ret = 0;
|
||||
int live, last;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uregs, sizeof(xtensa_gregset_t)))
|
||||
return -EIO;
|
||||
|
||||
ret |= __get_user(regs->pc, &gregset->pc);
|
||||
ret |= __get_user(ps, &gregset->ps);
|
||||
ret |= __get_user(regs->lbeg, &gregset->lbeg);
|
||||
ret |= __get_user(regs->lend, &gregset->lend);
|
||||
ret |= __get_user(regs->lcount, &gregset->lcount);
|
||||
|
||||
regs->ps = (regs->ps & ~ps_mask) | (ps & ps_mask) | (1 << PS_EXCM_BIT);
|
||||
|
||||
live = (wm & 2) ? 4 : (wm & 4) ? 8 : (wm & 8) ? 12 : 16;
|
||||
last = XCHAL_NUM_AREGS - (wm >> 4) * 4;
|
||||
ret |= __copy_from_user(regs->areg, gregset->a, live * 4);
|
||||
ret |= __copy_from_user(regs->areg+last, gregset->a+last, (wm>>4)*16);
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
|
||||
int ptrace_getxregs(struct task_struct *child, void __user *uregs)
|
||||
{
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
struct thread_info *ti = task_thread_info(child);
|
||||
elf_xtregs_t __user *xtregs = uregs;
|
||||
int ret = 0;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t)))
|
||||
return -EIO;
|
||||
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
/* Flush all coprocessor registers to memory. */
|
||||
coprocessor_flush_all(ti);
|
||||
ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp,
|
||||
sizeof(xtregs_coprocessor_t));
|
||||
#endif
|
||||
ret |= __copy_to_user(&xtregs->opt, ®s->xtregs_opt,
|
||||
sizeof(xtregs->opt));
|
||||
ret |= __copy_to_user(&xtregs->user,&ti->xtregs_user,
|
||||
sizeof(xtregs->user));
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
int ptrace_setxregs(struct task_struct *child, void __user *uregs)
|
||||
{
|
||||
struct thread_info *ti = task_thread_info(child);
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
elf_xtregs_t *xtregs = uregs;
|
||||
int ret = 0;
|
||||
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
/* Flush all coprocessors before we overwrite them. */
|
||||
coprocessor_flush_all(ti);
|
||||
coprocessor_release_all(ti);
|
||||
|
||||
ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0,
|
||||
sizeof(xtregs_coprocessor_t));
|
||||
#endif
|
||||
ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt,
|
||||
sizeof(xtregs->opt));
|
||||
ret |= __copy_from_user(&ti->xtregs_user, &xtregs->user,
|
||||
sizeof(xtregs->user));
|
||||
|
||||
return ret ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
int ptrace_peekusr(struct task_struct *child, long regno, long __user *ret)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
unsigned long tmp;
|
||||
|
||||
regs = task_pt_regs(child);
|
||||
tmp = 0; /* Default return value. */
|
||||
|
||||
switch(regno) {
|
||||
|
||||
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
|
||||
{
|
||||
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
|
||||
ar &= (XCHAL_NUM_AREGS - 1);
|
||||
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
|
||||
tmp = regs->areg[ar];
|
||||
else
|
||||
ret = -EIO;
|
||||
tmp = regs->areg[regno - REG_AR_BASE];
|
||||
break;
|
||||
}
|
||||
|
||||
case REG_A_BASE ... REG_A_BASE + 15:
|
||||
tmp = regs->areg[addr - REG_A_BASE];
|
||||
tmp = regs->areg[regno - REG_A_BASE];
|
||||
break;
|
||||
|
||||
case REG_PC:
|
||||
tmp = regs->pc;
|
||||
break;
|
||||
|
||||
case REG_PS:
|
||||
/* Note: PS.EXCM is not set while user task is running;
|
||||
* its being set in regs is for exception handling
|
||||
* convenience. */
|
||||
tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
|
||||
break;
|
||||
|
||||
case REG_WB:
|
||||
tmp = regs->windowbase;
|
||||
break;
|
||||
break; /* tmp = 0 */
|
||||
|
||||
case REG_WS:
|
||||
tmp = regs->windowstart;
|
||||
{
|
||||
unsigned long wb = regs->windowbase;
|
||||
unsigned long ws = regs->windowstart;
|
||||
tmp = ((ws>>wb) | (ws<<(WSBITS-wb))) & ((1<<WSBITS)-1);
|
||||
break;
|
||||
}
|
||||
case REG_LBEG:
|
||||
tmp = regs->lbeg;
|
||||
break;
|
||||
|
||||
case REG_LEND:
|
||||
tmp = regs->lend;
|
||||
break;
|
||||
|
||||
case REG_LCOUNT:
|
||||
tmp = regs->lcount;
|
||||
break;
|
||||
|
||||
case REG_SAR:
|
||||
tmp = regs->sar;
|
||||
break;
|
||||
case REG_DEPC:
|
||||
tmp = regs->depc;
|
||||
break;
|
||||
case REG_EXCCAUSE:
|
||||
tmp = regs->exccause;
|
||||
break;
|
||||
case REG_EXCVADDR:
|
||||
tmp = regs->excvaddr;
|
||||
break;
|
||||
|
||||
case SYSCALL_NR:
|
||||
tmp = regs->syscall;
|
||||
break;
|
||||
default:
|
||||
tmp = 0;
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
}
|
||||
ret = put_user(tmp, (unsigned long *) data);
|
||||
goto out;
|
||||
}
|
||||
|
||||
case PTRACE_POKETEXT: /* write the word at location addr. */
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
return put_user(tmp, ret);
|
||||
}
|
||||
|
||||
int ptrace_pokeusr(struct task_struct *child, long regno, long val)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
regs = task_pt_regs(child);
|
||||
|
||||
switch (regno) {
|
||||
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
|
||||
regs->areg[regno - REG_AR_BASE] = val;
|
||||
break;
|
||||
|
||||
case REG_A_BASE ... REG_A_BASE + 15:
|
||||
regs->areg[regno - REG_A_BASE] = val;
|
||||
break;
|
||||
|
||||
case REG_PC:
|
||||
regs->pc = val;
|
||||
break;
|
||||
|
||||
case SYSCALL_NR:
|
||||
regs->syscall = val;
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
int ret = -EPERM;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_PEEKTEXT: /* read word at location addr. */
|
||||
case PTRACE_PEEKDATA:
|
||||
ret = generic_ptrace_peekdata(child, addr, data);
|
||||
break;
|
||||
|
||||
case PTRACE_PEEKUSR: /* read register specified by addr. */
|
||||
ret = ptrace_peekusr(child, addr, (void __user *) data);
|
||||
break;
|
||||
|
||||
case PTRACE_POKETEXT: /* write the word at location addr. */
|
||||
case PTRACE_POKEDATA:
|
||||
ret = generic_ptrace_pokedata(child, addr, data);
|
||||
goto out;
|
||||
|
||||
case PTRACE_POKEUSR:
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
regs = task_pt_regs(child);
|
||||
|
||||
switch (addr) {
|
||||
case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
|
||||
{
|
||||
int ar = addr - REG_AR_BASE - regs->windowbase * 4;
|
||||
if (ar < 16 && ar + (regs->wmask >> 4) * 4 >= 0)
|
||||
regs->areg[ar & (XCHAL_NUM_AREGS - 1)] = data;
|
||||
else
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
case REG_A_BASE ... REG_A_BASE + 15:
|
||||
regs->areg[addr - REG_A_BASE] = data;
|
||||
break;
|
||||
case REG_PC:
|
||||
regs->pc = data;
|
||||
break;
|
||||
case SYSCALL_NR:
|
||||
regs->syscall = data;
|
||||
break;
|
||||
#ifdef TEST_KERNEL
|
||||
case REG_WB:
|
||||
regs->windowbase = data;
|
||||
break;
|
||||
case REG_WS:
|
||||
regs->windowstart = data;
|
||||
break;
|
||||
#endif
|
||||
|
||||
default:
|
||||
/* The rest are not allowed. */
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_POKEUSR: /* write register specified by addr. */
|
||||
ret = ptrace_pokeusr(child, addr, data);
|
||||
break;
|
||||
|
||||
/* continue and stop at next (return from) syscall */
|
||||
|
||||
case PTRACE_SYSCALL:
|
||||
case PTRACE_CONT: /* restart after signal. */
|
||||
{
|
||||
|
@ -217,98 +310,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
|||
break;
|
||||
|
||||
case PTRACE_GETREGS:
|
||||
{
|
||||
/* 'data' points to user memory in which to write.
|
||||
* Mainly due to the non-live register values, we
|
||||
* reformat the register values into something more
|
||||
* standard. For convenience, we use the handy
|
||||
* elf_gregset_t format. */
|
||||
|
||||
xtensa_gregset_t format;
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
|
||||
do_copy_regs (&format, regs, child);
|
||||
|
||||
/* Now, copy to user space nice and easy... */
|
||||
ret = 0;
|
||||
if (copy_to_user((void *)data, &format, sizeof(elf_gregset_t)))
|
||||
ret = -EFAULT;
|
||||
ret = ptrace_getregs(child, (void __user *) data);
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETREGS:
|
||||
{
|
||||
/* 'data' points to user memory that contains the new
|
||||
* values in the elf_gregset_t format. */
|
||||
|
||||
xtensa_gregset_t format;
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
|
||||
if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
/* FIXME: Perhaps we want some sanity checks on
|
||||
* these user-space values? See ARM version. Are
|
||||
* debuggers a security concern? */
|
||||
|
||||
do_restore_regs (&format, regs, child);
|
||||
|
||||
ret = 0;
|
||||
ret = ptrace_setregs(child, (void __user *) data);
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_GETFPREGS:
|
||||
{
|
||||
/* 'data' points to user memory in which to write.
|
||||
* For convenience, we use the handy
|
||||
* elf_fpregset_t format. */
|
||||
|
||||
elf_fpregset_t fpregs;
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
|
||||
do_save_fpregs (&fpregs, regs, child);
|
||||
|
||||
/* Now, copy to user space nice and easy... */
|
||||
ret = 0;
|
||||
if (copy_to_user((void *)data, &fpregs, sizeof(elf_fpregset_t)))
|
||||
ret = -EFAULT;
|
||||
|
||||
case PTRACE_GETXTREGS:
|
||||
ret = ptrace_getxregs(child, (void __user *) data);
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETFPREGS:
|
||||
{
|
||||
/* 'data' points to user memory that contains the new
|
||||
* values in the elf_fpregset_t format.
|
||||
*/
|
||||
elf_fpregset_t fpregs;
|
||||
struct pt_regs *regs = task_pt_regs(child);
|
||||
|
||||
ret = 0;
|
||||
if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
if (do_restore_fpregs (&fpregs, regs, child))
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_GETFPREGSIZE:
|
||||
/* 'data' points to 'unsigned long' set to the size
|
||||
* of elf_fpregset_t
|
||||
*/
|
||||
ret = put_user(sizeof(elf_fpregset_t), (unsigned long *) data);
|
||||
case PTRACE_SETXTREGS:
|
||||
ret = ptrace_setxregs(child, (void __user *) data);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,13 +35,17 @@ asmlinkage int do_signal(struct pt_regs *regs, sigset_t *oldset);
|
|||
|
||||
extern struct task_struct *coproc_owners[];
|
||||
|
||||
extern void release_all_cp (struct task_struct *);
|
||||
|
||||
struct rt_sigframe
|
||||
{
|
||||
struct siginfo info;
|
||||
struct ucontext uc;
|
||||
cp_state_t cpstate;
|
||||
struct {
|
||||
xtregs_opt_t opt;
|
||||
xtregs_user_t user;
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
xtregs_coprocessor_t cp;
|
||||
#endif
|
||||
} xtregs;
|
||||
unsigned char retcode[6];
|
||||
unsigned int window[4];
|
||||
};
|
||||
|
@ -132,9 +136,10 @@ errout:
|
|||
*/
|
||||
|
||||
static int
|
||||
setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
|
||||
struct pt_regs *regs)
|
||||
setup_sigcontext(struct rt_sigframe __user *frame, struct pt_regs *regs)
|
||||
{
|
||||
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
|
||||
struct thread_info *ti = current_thread_info();
|
||||
int err = 0;
|
||||
|
||||
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
|
||||
|
@ -148,21 +153,32 @@ setup_sigcontext(struct sigcontext __user *sc, cp_state_t *cpstate,
|
|||
|
||||
err |= flush_window_regs_user(regs);
|
||||
err |= __copy_to_user (sc->sc_a, regs->areg, 16 * 4);
|
||||
err |= __put_user(0, &sc->sc_xtregs);
|
||||
|
||||
// err |= __copy_to_user (sc->sc_a, regs->areg, XCHAL_NUM_AREGS * 4)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
#if XCHAL_HAVE_CP
|
||||
# error Coprocessors unsupported
|
||||
err |= save_cpextra(cpstate);
|
||||
err |= __put_user(err ? NULL : cpstate, &sc->sc_cpstate);
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
coprocessor_flush_all(ti);
|
||||
coprocessor_release_all(ti);
|
||||
err |= __copy_to_user(&frame->xtregs.cp, &ti->xtregs_cp,
|
||||
sizeof (frame->xtregs.cp));
|
||||
#endif
|
||||
err |= __copy_to_user(&frame->xtregs.opt, ®s->xtregs_opt,
|
||||
sizeof (xtregs_opt_t));
|
||||
err |= __copy_to_user(&frame->xtregs.user, &ti->xtregs_user,
|
||||
sizeof (xtregs_user_t));
|
||||
|
||||
err |= __put_user(err ? NULL : &frame->xtregs, &sc->sc_xtregs);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
||||
restore_sigcontext(struct pt_regs *regs, struct rt_sigframe __user *frame)
|
||||
{
|
||||
struct sigcontext __user *sc = &frame->uc.uc_mcontext;
|
||||
struct thread_info *ti = current_thread_info();
|
||||
unsigned int err = 0;
|
||||
unsigned long ps;
|
||||
|
||||
|
@ -180,6 +196,8 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
|||
regs->windowbase = 0;
|
||||
regs->windowstart = 1;
|
||||
|
||||
regs->syscall = -1; /* disable syscall checks */
|
||||
|
||||
/* For PS, restore only PS.CALLINC.
|
||||
* Assume that all other bits are either the same as for the signal
|
||||
* handler, or the user mode value doesn't matter (e.g. PS.OWB).
|
||||
|
@ -195,8 +213,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
|||
|
||||
err |= __copy_from_user(regs->areg, sc->sc_a, 16 * 4);
|
||||
|
||||
#if XCHAL_HAVE_CP
|
||||
# error Coprocessors unsupported
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* The signal handler may have used coprocessors in which
|
||||
* case they are still enabled. We disable them to force a
|
||||
* reloading of the original task's CP state by the lazy
|
||||
|
@ -204,20 +223,20 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
|
|||
* Also, we essentially discard any coprocessor state that the
|
||||
* signal handler created. */
|
||||
|
||||
if (!err) {
|
||||
struct task_struct *tsk = current;
|
||||
release_all_cp(tsk);
|
||||
err |= __copy_from_user(tsk->thread.cpextra, sc->sc_cpstate,
|
||||
XTENSA_CP_EXTRA_SIZE);
|
||||
}
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
coprocessor_release_all(ti);
|
||||
err |= __copy_from_user(&ti->xtregs_cp, &frame->xtregs.cp,
|
||||
sizeof (frame->xtregs.cp));
|
||||
#endif
|
||||
err |= __copy_from_user(&ti->xtregs_user, &frame->xtregs.user,
|
||||
sizeof (xtregs_user_t));
|
||||
err |= __copy_from_user(®s->xtregs_opt, &frame->xtregs.opt,
|
||||
sizeof (xtregs_opt_t));
|
||||
|
||||
regs->syscall = -1; /* disable syscall checks */
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* Do a signal return; undo the signal stack.
|
||||
*/
|
||||
|
@ -246,7 +265,7 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
|
|||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
|
||||
if (restore_sigcontext(regs, frame))
|
||||
goto badframe;
|
||||
|
||||
ret = regs->areg[2];
|
||||
|
@ -359,7 +378,7 @@ static void setup_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
|||
err |= __put_user(sas_ss_flags(regs->areg[1]),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= setup_sigcontext(&frame->uc.uc_mcontext, &frame->cpstate, regs);
|
||||
err |= setup_sigcontext(frame, regs);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
|
||||
/* Create sys_rt_sigreturn syscall in stack frame */
|
||||
|
|
|
@ -118,28 +118,28 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = {
|
|||
{ EXCCAUSE_STORE_CACHE_ATTRIBUTE, 0, do_page_fault },
|
||||
{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE, 0, do_page_fault },
|
||||
/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
|
||||
#if (XCHAL_CP_MASK & 1)
|
||||
#if XTENSA_HAVE_COPROCESSOR(0)
|
||||
COPROCESSOR(0),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 2)
|
||||
#if XTENSA_HAVE_COPROCESSOR(1)
|
||||
COPROCESSOR(1),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 4)
|
||||
#if XTENSA_HAVE_COPROCESSOR(2)
|
||||
COPROCESSOR(2),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 8)
|
||||
#if XTENSA_HAVE_COPROCESSOR(3)
|
||||
COPROCESSOR(3),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 16)
|
||||
#if XTENSA_HAVE_COPROCESSOR(4)
|
||||
COPROCESSOR(4),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 32)
|
||||
#if XTENSA_HAVE_COPROCESSOR(5)
|
||||
COPROCESSOR(5),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 64)
|
||||
#if XTENSA_HAVE_COPROCESSOR(6)
|
||||
COPROCESSOR(6),
|
||||
#endif
|
||||
#if (XCHAL_CP_MASK & 128)
|
||||
#if XTENSA_HAVE_COPROCESSOR(7)
|
||||
COPROCESSOR(7),
|
||||
#endif
|
||||
{ EXCCAUSE_MAPPED_DEBUG, 0, do_debug },
|
||||
|
|
|
@ -5,81 +5,168 @@
|
|||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2003 - 2005 Tensilica Inc.
|
||||
* Copyright (C) 2003 - 2007 Tensilica Inc.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef _XTENSA_COPROCESSOR_H
|
||||
#define _XTENSA_COPROCESSOR_H
|
||||
|
||||
#include <asm/variant/core.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/variant/tie.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
#if !XCHAL_HAVE_CP
|
||||
#ifdef __ASSEMBLY__
|
||||
# include <asm/variant/tie-asm.h>
|
||||
|
||||
#define XTENSA_CP_EXTRA_OFFSET 0
|
||||
#define XTENSA_CP_EXTRA_ALIGN 1 /* must be a power of 2 */
|
||||
#define XTENSA_CP_EXTRA_SIZE 0
|
||||
.macro xchal_sa_start a b
|
||||
.set .Lxchal_pofs_, 0
|
||||
.set .Lxchal_ofs_, 0
|
||||
.endm
|
||||
|
||||
#else
|
||||
.macro xchal_sa_align ptr minofs maxofs ofsalign totalign
|
||||
.set .Lxchal_ofs_, .Lxchal_ofs_ + .Lxchal_pofs_ + \totalign - 1
|
||||
.set .Lxchal_ofs_, (.Lxchal_ofs_ & -\totalign) - .Lxchal_pofs_
|
||||
.endm
|
||||
|
||||
#define XTOFS(last_start,last_size,align) \
|
||||
((last_start+last_size+align-1) & -align)
|
||||
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
|
||||
| XTHAL_SAS_CC \
|
||||
| XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
|
||||
|
||||
#define XTENSA_CP_EXTRA_OFFSET 0
|
||||
#define XTENSA_CP_EXTRA_ALIGN XCHAL_EXTRA_SA_ALIGN
|
||||
.macro save_xtregs_opt ptr clb at1 at2 at3 at4 offset
|
||||
.if XTREGS_OPT_SIZE > 0
|
||||
addi \clb, \ptr, \offset
|
||||
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
|
||||
.endif
|
||||
.endm
|
||||
|
||||
#define XTENSA_CPE_CP0_OFFSET \
|
||||
XTOFS(XTENSA_CP_EXTRA_OFFSET, XCHAL_EXTRA_SA_SIZE, XCHAL_CP0_SA_ALIGN)
|
||||
#define XTENSA_CPE_CP1_OFFSET \
|
||||
XTOFS(XTENSA_CPE_CP0_OFFSET, XCHAL_CP0_SA_SIZE, XCHAL_CP1_SA_ALIGN)
|
||||
#define XTENSA_CPE_CP2_OFFSET \
|
||||
XTOFS(XTENSA_CPE_CP1_OFFSET, XCHAL_CP1_SA_SIZE, XCHAL_CP2_SA_ALIGN)
|
||||
#define XTENSA_CPE_CP3_OFFSET \
|
||||
XTOFS(XTENSA_CPE_CP2_OFFSET, XCHAL_CP2_SA_SIZE, XCHAL_CP3_SA_ALIGN)
|
||||
#define XTENSA_CPE_CP4_OFFSET \
|
||||
XTOFS(XTENSA_CPE_CP3_OFFSET, XCHAL_CP3_SA_SIZE, XCHAL_CP4_SA_ALIGN)
|
||||
#define XTENSA_CPE_CP5_OFFSET \
|
||||
XTOFS(XTENSA_CPE_CP4_OFFSET, XCHAL_CP4_SA_SIZE, XCHAL_CP5_SA_ALIGN)
|
||||
#define XTENSA_CPE_CP6_OFFSET \
|
||||
XTOFS(XTENSA_CPE_CP5_OFFSET, XCHAL_CP5_SA_SIZE, XCHAL_CP6_SA_ALIGN)
|
||||
#define XTENSA_CPE_CP7_OFFSET \
|
||||
XTOFS(XTENSA_CPE_CP6_OFFSET, XCHAL_CP6_SA_SIZE, XCHAL_CP7_SA_ALIGN)
|
||||
#define XTENSA_CP_EXTRA_SIZE \
|
||||
XTOFS(XTENSA_CPE_CP7_OFFSET, XCHAL_CP7_SA_SIZE, 16)
|
||||
.macro load_xtregs_opt ptr clb at1 at2 at3 at4 offset
|
||||
.if XTREGS_OPT_SIZE > 0
|
||||
addi \clb, \ptr, \offset
|
||||
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
|
||||
.endif
|
||||
.endm
|
||||
#undef _SELECT
|
||||
|
||||
#define _SELECT ( XTHAL_SAS_TIE | XTHAL_SAS_OPT \
|
||||
| XTHAL_SAS_NOCC \
|
||||
| XTHAL_SAS_CALR | XTHAL_SAS_CALE | XTHAL_SAS_GLOB )
|
||||
|
||||
.macro save_xtregs_user ptr clb at1 at2 at3 at4 offset
|
||||
.if XTREGS_USER_SIZE > 0
|
||||
addi \clb, \ptr, \offset
|
||||
xchal_ncp_store \clb \at1 \at2 \at3 \at4 select=_SELECT
|
||||
.endif
|
||||
.endm
|
||||
|
||||
.macro load_xtregs_user ptr clb at1 at2 at3 at4 offset
|
||||
.if XTREGS_USER_SIZE > 0
|
||||
addi \clb, \ptr, \offset
|
||||
xchal_ncp_load \clb \at1 \at2 \at3 \at4 select=_SELECT
|
||||
.endif
|
||||
.endm
|
||||
#undef _SELECT
|
||||
|
||||
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#if XCHAL_CP_NUM > 0
|
||||
# ifndef __ASSEMBLY__
|
||||
/*
|
||||
* Tasks that own contents of (last user) each coprocessor.
|
||||
* Entries are 0 for not-owned or non-existent coprocessors.
|
||||
* Note: The size of this structure is fixed to 8 bytes in entry.S
|
||||
* XTENSA_HAVE_COPROCESSOR(x) returns 1 if coprocessor x is configured.
|
||||
*
|
||||
* XTENSA_HAVE_IO_PORT(x) returns 1 if io-port x is configured.
|
||||
*
|
||||
*/
|
||||
typedef struct {
|
||||
struct task_struct *owner; /* owner */
|
||||
int offset; /* offset in cpextra space. */
|
||||
} coprocessor_info_t;
|
||||
# else
|
||||
# define COPROCESSOR_INFO_OWNER 0
|
||||
# define COPROCESSOR_INFO_OFFSET 4
|
||||
# define COPROCESSOR_INFO_SIZE 8
|
||||
# endif
|
||||
#endif
|
||||
#endif /* XCHAL_HAVE_CP */
|
||||
|
||||
#define XTENSA_HAVE_COPROCESSOR(x) \
|
||||
((XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK) & (1 << (x)))
|
||||
#define XTENSA_HAVE_COPROCESSORS \
|
||||
(XCHAL_CP_MASK ^ XCHAL_CP_PORT_MASK)
|
||||
#define XTENSA_HAVE_IO_PORT(x) \
|
||||
(XCHAL_CP_PORT_MASK & (1 << (x)))
|
||||
#define XTENSA_HAVE_IO_PORTS \
|
||||
XCHAL_CP_PORT_MASK
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
# if XCHAL_CP_NUM > 0
|
||||
struct task_struct;
|
||||
extern void release_coprocessors (struct task_struct*);
|
||||
extern void save_coprocessor_registers(void*, int);
|
||||
# else
|
||||
# define release_coprocessors(task)
|
||||
# endif
|
||||
|
||||
typedef unsigned char cp_state_t[XTENSA_CP_EXTRA_SIZE]
|
||||
__attribute__ ((aligned (XTENSA_CP_EXTRA_ALIGN)));
|
||||
|
||||
#if XCHAL_HAVE_CP
|
||||
|
||||
#define RSR_CPENABLE(x) do { \
|
||||
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
|
||||
} while(0);
|
||||
#define WSR_CPENABLE(x) do { \
|
||||
__asm__ __volatile__("wsr %0," __stringify(CPENABLE) "; rsync" \
|
||||
:: "a" (x)); \
|
||||
} while(0);
|
||||
|
||||
#endif /* XCHAL_HAVE_CP */
|
||||
|
||||
|
||||
/*
|
||||
* Additional registers.
|
||||
* We define three types of additional registers:
|
||||
* ext: extra registers that are used by the compiler
|
||||
* cpn: optional registers that can be used by a user application
|
||||
* cpX: coprocessor registers that can only be used if the corresponding
|
||||
* CPENABLE bit is set.
|
||||
*/
|
||||
|
||||
#define XCHAL_SA_REG(list,compiler,x,type,y,name,z,align,size,...) \
|
||||
__REG ## list (compiler, type, name, size, align)
|
||||
|
||||
#define __REG0(compiler,t,name,s,a) __REG0_ ## compiler (name)
|
||||
#define __REG1(compiler,t,name,s,a) __REG1_ ## compiler (name)
|
||||
#define __REG2(c,type,...) __REG2_ ## type (__VA_ARGS__)
|
||||
|
||||
#define __REG0_0(name)
|
||||
#define __REG0_1(name) __u32 name;
|
||||
#define __REG1_0(name) __u32 name;
|
||||
#define __REG1_1(name)
|
||||
#define __REG2_0(n,s,a) __u32 name;
|
||||
#define __REG2_1(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
|
||||
#define __REG2_2(n,s,a) unsigned char n[s] __attribute__ ((aligned(a)));
|
||||
|
||||
typedef struct { XCHAL_NCP_SA_LIST(0) } xtregs_opt_t
|
||||
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
|
||||
typedef struct { XCHAL_NCP_SA_LIST(1) } xtregs_user_t
|
||||
__attribute__ ((aligned (XCHAL_NCP_SA_ALIGN)));
|
||||
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
|
||||
typedef struct { XCHAL_CP0_SA_LIST(2) } xtregs_cp0_t
|
||||
__attribute__ ((aligned (XCHAL_CP0_SA_ALIGN)));
|
||||
typedef struct { XCHAL_CP1_SA_LIST(2) } xtregs_cp1_t
|
||||
__attribute__ ((aligned (XCHAL_CP1_SA_ALIGN)));
|
||||
typedef struct { XCHAL_CP2_SA_LIST(2) } xtregs_cp2_t
|
||||
__attribute__ ((aligned (XCHAL_CP2_SA_ALIGN)));
|
||||
typedef struct { XCHAL_CP3_SA_LIST(2) } xtregs_cp3_t
|
||||
__attribute__ ((aligned (XCHAL_CP3_SA_ALIGN)));
|
||||
typedef struct { XCHAL_CP4_SA_LIST(2) } xtregs_cp4_t
|
||||
__attribute__ ((aligned (XCHAL_CP4_SA_ALIGN)));
|
||||
typedef struct { XCHAL_CP5_SA_LIST(2) } xtregs_cp5_t
|
||||
__attribute__ ((aligned (XCHAL_CP5_SA_ALIGN)));
|
||||
typedef struct { XCHAL_CP6_SA_LIST(2) } xtregs_cp6_t
|
||||
__attribute__ ((aligned (XCHAL_CP6_SA_ALIGN)));
|
||||
typedef struct { XCHAL_CP7_SA_LIST(2) } xtregs_cp7_t
|
||||
__attribute__ ((aligned (XCHAL_CP7_SA_ALIGN)));
|
||||
|
||||
extern struct thread_info* coprocessor_owner[XCHAL_CP_MAX];
|
||||
extern void coprocessor_save(void*, int);
|
||||
extern void coprocessor_load(void*, int);
|
||||
extern void coprocessor_flush(struct thread_info*, int);
|
||||
extern void coprocessor_restore(struct thread_info*, int);
|
||||
|
||||
extern void coprocessor_release_all(struct thread_info*);
|
||||
extern void coprocessor_flush_all(struct thread_info*);
|
||||
|
||||
static inline void coprocessor_clear_cpenable(void)
|
||||
{
|
||||
unsigned long i = 0;
|
||||
WSR_CPENABLE(i);
|
||||
}
|
||||
|
||||
#endif /* XTENSA_HAVE_COPROCESSORS */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
||||
#endif /* _XTENSA_COPROCESSOR_H */
|
||||
|
|
|
@ -173,6 +173,21 @@ extern void xtensa_elf_core_copy_regs (xtensa_gregset_t *, struct pt_regs *);
|
|||
_r->areg[12]=0; _r->areg[13]=0; _r->areg[14]=0; _r->areg[15]=0; \
|
||||
} while (0)
|
||||
|
||||
typedef struct {
|
||||
xtregs_opt_t opt;
|
||||
xtregs_user_t user;
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
xtregs_cp0_t cp0;
|
||||
xtregs_cp1_t cp1;
|
||||
xtregs_cp2_t cp2;
|
||||
xtregs_cp3_t cp3;
|
||||
xtregs_cp4_t cp4;
|
||||
xtregs_cp5_t cp5;
|
||||
xtregs_cp6_t cp6;
|
||||
xtregs_cp7_t cp7;
|
||||
#endif
|
||||
} elf_xtregs_t;
|
||||
|
||||
#define SET_PERSONALITY(ex, ibcs2) set_personality(PER_LINUX_32BIT)
|
||||
|
||||
struct task_struct;
|
||||
|
|
|
@ -103,10 +103,6 @@ struct thread_struct {
|
|||
unsigned long dbreaka[XCHAL_NUM_DBREAK];
|
||||
unsigned long dbreakc[XCHAL_NUM_DBREAK];
|
||||
|
||||
/* Allocate storage for extra state and coprocessor state. */
|
||||
unsigned char cp_save[XTENSA_CP_EXTRA_SIZE]
|
||||
__attribute__ ((aligned(XTENSA_CP_EXTRA_ALIGN)));
|
||||
|
||||
/* Make structure 16 bytes aligned. */
|
||||
int align[0] __attribute__ ((aligned(16)));
|
||||
};
|
||||
|
@ -162,21 +158,16 @@ struct thread_struct {
|
|||
struct task_struct;
|
||||
struct mm_struct;
|
||||
|
||||
// FIXME: do we need release_thread for CP??
|
||||
/* Free all resources held by a thread. */
|
||||
#define release_thread(thread) do { } while(0)
|
||||
|
||||
// FIXME: do we need prepare_to_copy (lazy status) for CP??
|
||||
/* Prepare to copy thread state - unlazy all lazy status */
|
||||
#define prepare_to_copy(tsk) do { } while (0)
|
||||
extern void prepare_to_copy(struct task_struct*);
|
||||
|
||||
/*
|
||||
* create a kernel thread without removing it from tasklists
|
||||
*/
|
||||
/* Create a kernel thread without removing it from tasklists */
|
||||
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
|
||||
|
||||
/* Copy and release all segment info associated with a VM */
|
||||
|
||||
#define copy_segments(p, mm) do { } while(0)
|
||||
#define release_segments(mm) do { } while(0)
|
||||
#define forget_segments() do { } while (0)
|
||||
|
|
|
@ -53,33 +53,30 @@
|
|||
|
||||
/* Registers used by strace */
|
||||
|
||||
#define REG_A_BASE 0xfc000000
|
||||
#define REG_AR_BASE 0x04000000
|
||||
#define REG_PC 0x14000000
|
||||
#define REG_PS 0x080000e6
|
||||
#define REG_WB 0x08000048
|
||||
#define REG_WS 0x08000049
|
||||
#define REG_LBEG 0x08000000
|
||||
#define REG_LEND 0x08000001
|
||||
#define REG_LCOUNT 0x08000002
|
||||
#define REG_SAR 0x08000003
|
||||
#define REG_DEPC 0x080000c0
|
||||
#define REG_EXCCAUSE 0x080000e8
|
||||
#define REG_EXCVADDR 0x080000ee
|
||||
#define SYSCALL_NR 0x1
|
||||
#define REG_A_BASE 0x0000
|
||||
#define REG_AR_BASE 0x0100
|
||||
#define REG_PC 0x0020
|
||||
#define REG_PS 0x02e6
|
||||
#define REG_WB 0x0248
|
||||
#define REG_WS 0x0249
|
||||
#define REG_LBEG 0x0200
|
||||
#define REG_LEND 0x0201
|
||||
#define REG_LCOUNT 0x0202
|
||||
#define REG_SAR 0x0203
|
||||
|
||||
#define AR_REGNO_TO_A_REGNO(ar, wb) (ar - wb*4) & ~(XCHAL_NUM_AREGS - 1)
|
||||
#define SYSCALL_NR 0x00ff
|
||||
|
||||
/* Other PTRACE_ values defined in <linux/ptrace.h> using values 0-9,16,17,24 */
|
||||
|
||||
#define PTRACE_GETREGS 12
|
||||
#define PTRACE_SETREGS 13
|
||||
#define PTRACE_GETFPREGS 14
|
||||
#define PTRACE_SETFPREGS 15
|
||||
#define PTRACE_GETFPREGSIZE 18
|
||||
#define PTRACE_GETREGS 12
|
||||
#define PTRACE_SETREGS 13
|
||||
#define PTRACE_GETXTREGS 18
|
||||
#define PTRACE_SETXTREGS 19
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/*
|
||||
* This struct defines the way the registers are stored on the
|
||||
* kernel stack during a system call or other kernel entry.
|
||||
|
@ -102,6 +99,9 @@ struct pt_regs {
|
|||
unsigned long icountlevel; /* 60 */
|
||||
int reserved[1]; /* 64 */
|
||||
|
||||
/* Additional configurable registers that are used by the compiler. */
|
||||
xtregs_opt_t xtregs_opt;
|
||||
|
||||
/* Make sure the areg field is 16 bytes aligned. */
|
||||
int align[0] __attribute__ ((aligned(16)));
|
||||
|
||||
|
@ -111,8 +111,6 @@ struct pt_regs {
|
|||
unsigned long areg[16]; /* 128 (64) */
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/variant/core.h>
|
||||
|
||||
# define task_pt_regs(tsk) ((struct pt_regs*) \
|
||||
|
|
|
@ -100,7 +100,14 @@
|
|||
#define EXCCAUSE_DTLB_SIZE_RESTRICTION 27
|
||||
#define EXCCAUSE_LOAD_CACHE_ATTRIBUTE 28
|
||||
#define EXCCAUSE_STORE_CACHE_ATTRIBUTE 29
|
||||
#define EXCCAUSE_FLOATING_POINT 40
|
||||
#define EXCCAUSE_COPROCESSOR0_DISABLED 32
|
||||
#define EXCCAUSE_COPROCESSOR1_DISABLED 33
|
||||
#define EXCCAUSE_COPROCESSOR2_DISABLED 34
|
||||
#define EXCCAUSE_COPROCESSOR3_DISABLED 35
|
||||
#define EXCCAUSE_COPROCESSOR4_DISABLED 36
|
||||
#define EXCCAUSE_COPROCESSOR5_DISABLED 37
|
||||
#define EXCCAUSE_COPROCESSOR6_DISABLED 38
|
||||
#define EXCCAUSE_COPROCESSOR7_DISABLED 39
|
||||
|
||||
/* PS register fields. */
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@ struct sigcontext {
|
|||
unsigned long sc_acclo;
|
||||
unsigned long sc_acchi;
|
||||
unsigned long sc_a[16];
|
||||
void *sc_xtregs;
|
||||
};
|
||||
|
||||
#endif /* _XTENSA_SIGCONTEXT_H */
|
||||
|
|
|
@ -46,42 +46,6 @@ static inline int irqs_disabled(void)
|
|||
return flags & 0xf;
|
||||
}
|
||||
|
||||
#define RSR_CPENABLE(x) do { \
|
||||
__asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \
|
||||
} while(0);
|
||||
#define WSR_CPENABLE(x) do { \
|
||||
__asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \
|
||||
:: "a" (x));} while(0);
|
||||
|
||||
#define clear_cpenable() __clear_cpenable()
|
||||
|
||||
static inline void __clear_cpenable(void)
|
||||
{
|
||||
#if XCHAL_HAVE_CP
|
||||
unsigned long i = 0;
|
||||
WSR_CPENABLE(i);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void enable_coprocessor(int i)
|
||||
{
|
||||
#if XCHAL_HAVE_CP
|
||||
int cp;
|
||||
RSR_CPENABLE(cp);
|
||||
cp |= 1 << i;
|
||||
WSR_CPENABLE(cp);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void disable_coprocessor(int i)
|
||||
{
|
||||
#if XCHAL_HAVE_CP
|
||||
int cp;
|
||||
RSR_CPENABLE(cp);
|
||||
cp &= ~(1 << i);
|
||||
WSR_CPENABLE(cp);
|
||||
#endif
|
||||
}
|
||||
|
||||
#define smp_read_barrier_depends() do { } while(0)
|
||||
#define read_barrier_depends() do { } while(0)
|
||||
|
@ -111,7 +75,6 @@ extern void *_switch_to(void *last, void *next);
|
|||
|
||||
#define switch_to(prev,next,last) \
|
||||
do { \
|
||||
clear_cpenable(); \
|
||||
(last) = _switch_to(prev, next); \
|
||||
} while(0)
|
||||
|
||||
|
@ -244,7 +207,7 @@ static inline void spill_registers(void)
|
|||
"wsr a13," __stringify(SAR) "\n\t"
|
||||
"wsr a14," __stringify(PS) "\n\t"
|
||||
:: "a" (&a0), "a" (&ps)
|
||||
: "a2", "a3", "a12", "a13", "a14", "a15", "memory");
|
||||
: "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", "memory");
|
||||
}
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
|
|
@ -27,6 +27,21 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
|
||||
typedef struct xtregs_coprocessor {
|
||||
xtregs_cp0_t cp0;
|
||||
xtregs_cp1_t cp1;
|
||||
xtregs_cp2_t cp2;
|
||||
xtregs_cp3_t cp3;
|
||||
xtregs_cp4_t cp4;
|
||||
xtregs_cp5_t cp5;
|
||||
xtregs_cp6_t cp6;
|
||||
xtregs_cp7_t cp7;
|
||||
} xtregs_coprocessor_t;
|
||||
|
||||
#endif
|
||||
|
||||
struct thread_info {
|
||||
struct task_struct *task; /* main task structure */
|
||||
struct exec_domain *exec_domain; /* execution domain */
|
||||
|
@ -38,7 +53,13 @@ struct thread_info {
|
|||
mm_segment_t addr_limit; /* thread address space */
|
||||
struct restart_block restart_block;
|
||||
|
||||
unsigned long cpenable;
|
||||
|
||||
/* Allocate storage for extra user states and coprocessor states. */
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
xtregs_coprocessor_t xtregs_cp;
|
||||
#endif
|
||||
xtregs_user_t xtregs_user;
|
||||
};
|
||||
|
||||
#else /* !__ASSEMBLY__ */
|
||||
|
|
70
include/asm-xtensa/variant-fsf/tie-asm.h
Normal file
70
include/asm-xtensa/variant-fsf/tie-asm.h
Normal file
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* This header file contains assembly-language definitions (assembly
|
||||
* macros, etc.) for this specific Xtensa processor's TIE extensions
|
||||
* and options. It is customized to this Xtensa processor configuration.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1999-2008 Tensilica Inc.
|
||||
*/
|
||||
|
||||
#ifndef _XTENSA_CORE_TIE_ASM_H
|
||||
#define _XTENSA_CORE_TIE_ASM_H
|
||||
|
||||
/* Selection parameter values for save-area save/restore macros: */
|
||||
/* Option vs. TIE: */
|
||||
#define XTHAL_SAS_TIE 0x0001 /* custom extension or coprocessor */
|
||||
#define XTHAL_SAS_OPT 0x0002 /* optional (and not a coprocessor) */
|
||||
/* Whether used automatically by compiler: */
|
||||
#define XTHAL_SAS_NOCC 0x0004 /* not used by compiler w/o special opts/code */
|
||||
#define XTHAL_SAS_CC 0x0008 /* used by compiler without special opts/code */
|
||||
/* ABI handling across function calls: */
|
||||
#define XTHAL_SAS_CALR 0x0010 /* caller-saved */
|
||||
#define XTHAL_SAS_CALE 0x0020 /* callee-saved */
|
||||
#define XTHAL_SAS_GLOB 0x0040 /* global across function calls (in thread) */
|
||||
/* Misc */
|
||||
#define XTHAL_SAS_ALL 0xFFFF /* include all default NCP contents */
|
||||
|
||||
|
||||
|
||||
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
|
||||
* (not including zero-overhead loop registers).
|
||||
* Save area ptr (clobbered): ptr (1 byte aligned)
|
||||
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
|
||||
*/
|
||||
.macro xchal_ncp_store ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
|
||||
xchal_sa_start \continue, \ofs
|
||||
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
|
||||
xchal_sa_align \ptr, 0, 1024-4, 4, 4
|
||||
rur \at1, THREADPTR // threadptr option
|
||||
s32i \at1, \ptr, .Lxchal_ofs_ + 0
|
||||
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
|
||||
.endif
|
||||
.endm // xchal_ncp_store
|
||||
|
||||
/* Macro to save all non-coprocessor (extra) custom TIE and optional state
|
||||
* (not including zero-overhead loop registers).
|
||||
* Save area ptr (clobbered): ptr (1 byte aligned)
|
||||
* Scratch regs (clobbered): at1..at4 (only first XCHAL_NCP_NUM_ATMPS needed)
|
||||
*/
|
||||
.macro xchal_ncp_load ptr at1 at2 at3 at4 continue=0 ofs=-1 select=XTHAL_SAS_ALL
|
||||
xchal_sa_start \continue, \ofs
|
||||
.ifeq (XTHAL_SAS_OPT | XTHAL_SAS_CC | XTHAL_SAS_GLOB) & ~\select
|
||||
xchal_sa_align \ptr, 0, 1024-4, 4, 4
|
||||
l32i \at1, \ptr, .Lxchal_ofs_ + 0
|
||||
wur \at1, THREADPTR // threadptr option
|
||||
.set .Lxchal_ofs_, .Lxchal_ofs_ + 4
|
||||
.endif
|
||||
.endm // xchal_ncp_load
|
||||
|
||||
|
||||
|
||||
#define XCHAL_NCP_NUM_ATMPS 1
|
||||
|
||||
|
||||
#define XCHAL_SA_NUM_ATMPS 1
|
||||
|
||||
#endif /*_XTENSA_CORE_TIE_ASM_H*/
|
||||
|
|
@ -1,22 +1,77 @@
|
|||
/*
|
||||
* Xtensa processor core configuration information.
|
||||
* This header file describes this specific Xtensa processor's TIE extensions
|
||||
* that extend basic Xtensa core functionality. It is customized to this
|
||||
* Xtensa processor configuration.
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 1999-2006 Tensilica Inc.
|
||||
* Copyright (C) 1999-2007 Tensilica Inc.
|
||||
*/
|
||||
|
||||
#ifndef XTENSA_TIE_H
|
||||
#define XTENSA_TIE_H
|
||||
|
||||
/*----------------------------------------------------------------------
|
||||
COPROCESSORS and EXTRA STATE
|
||||
----------------------------------------------------------------------*/
|
||||
#ifndef _XTENSA_CORE_TIE_H
|
||||
#define _XTENSA_CORE_TIE_H
|
||||
|
||||
#define XCHAL_CP_NUM 0 /* number of coprocessors */
|
||||
#define XCHAL_CP_MASK 0x00
|
||||
#define XCHAL_CP_MAX 0 /* max CP ID + 1 (0 if none) */
|
||||
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
|
||||
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
|
||||
|
||||
#endif /*XTENSA_CONFIG_TIE_H*/
|
||||
/* Basic parameters of each coprocessor: */
|
||||
#define XCHAL_CP7_NAME "XTIOP"
|
||||
#define XCHAL_CP7_IDENT XTIOP
|
||||
#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
|
||||
#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
|
||||
#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
|
||||
|
||||
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
|
||||
#define XCHAL_NCP_SA_SIZE 0
|
||||
#define XCHAL_NCP_SA_ALIGN 1
|
||||
#define XCHAL_CP0_SA_SIZE 0
|
||||
#define XCHAL_CP0_SA_ALIGN 1
|
||||
#define XCHAL_CP1_SA_SIZE 0
|
||||
#define XCHAL_CP1_SA_ALIGN 1
|
||||
#define XCHAL_CP2_SA_SIZE 0
|
||||
#define XCHAL_CP2_SA_ALIGN 1
|
||||
#define XCHAL_CP3_SA_SIZE 0
|
||||
#define XCHAL_CP3_SA_ALIGN 1
|
||||
#define XCHAL_CP4_SA_SIZE 0
|
||||
#define XCHAL_CP4_SA_ALIGN 1
|
||||
#define XCHAL_CP5_SA_SIZE 0
|
||||
#define XCHAL_CP5_SA_ALIGN 1
|
||||
#define XCHAL_CP6_SA_SIZE 0
|
||||
#define XCHAL_CP6_SA_ALIGN 1
|
||||
|
||||
/* Save area for non-coprocessor optional and custom (TIE) state: */
|
||||
#define XCHAL_NCP_SA_SIZE 0
|
||||
#define XCHAL_NCP_SA_ALIGN 1
|
||||
|
||||
/* Total save area for optional and custom state (NCP + CPn): */
|
||||
#define XCHAL_TOTAL_SA_SIZE 0 /* with 16-byte align padding */
|
||||
#define XCHAL_TOTAL_SA_ALIGN 1 /* actual minimum alignment */
|
||||
|
||||
#define XCHAL_NCP_SA_NUM 0
|
||||
#define XCHAL_NCP_SA_LIST(s)
|
||||
#define XCHAL_CP0_SA_NUM 0
|
||||
#define XCHAL_CP0_SA_LIST(s)
|
||||
#define XCHAL_CP1_SA_NUM 0
|
||||
#define XCHAL_CP1_SA_LIST(s)
|
||||
#define XCHAL_CP2_SA_NUM 0
|
||||
#define XCHAL_CP2_SA_LIST(s)
|
||||
#define XCHAL_CP3_SA_NUM 0
|
||||
#define XCHAL_CP3_SA_LIST(s)
|
||||
#define XCHAL_CP4_SA_NUM 0
|
||||
#define XCHAL_CP4_SA_LIST(s)
|
||||
#define XCHAL_CP5_SA_NUM 0
|
||||
#define XCHAL_CP5_SA_LIST(s)
|
||||
#define XCHAL_CP6_SA_NUM 0
|
||||
#define XCHAL_CP6_SA_LIST(s)
|
||||
#define XCHAL_CP7_SA_NUM 0
|
||||
#define XCHAL_CP7_SA_LIST(s)
|
||||
|
||||
/* Byte length of instruction from its first nibble (op0 field), per FLIX. */
|
||||
#define XCHAL_OP0_FORMAT_LENGTHS 3,3,3,3,3,3,3,3,2,2,2,2,2,2,3,3
|
||||
|
||||
#endif /*_XTENSA_CORE_TIE_H*/
|
||||
|
||||
|
|
Loading…
Reference in a new issue