Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 apic updates from Thomas Gleixner: "This update provides: - Cleanup of the IDT management including the removal of the extra tracing IDT. A first step to cleanup the vector management code. - The removal of the paravirt op adjust_exception_frame. This is a XEN specific issue, but merged through this branch to avoid nasty merge collisions - Prevent dmesg spam about the TSC DEADLINE bug, when the CPU has disabled the TSC DEADLINE timer in CPUID. - Adjust a debug message in the ioapic code to print out the information correctly" * 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (51 commits) x86/idt: Fix the X86_TRAP_BP gate x86/xen: Get rid of paravirt op adjust_exception_frame x86/eisa: Add missing include x86/idt: Remove superfluous ALIGNment x86/apic: Silence "FW_BUG TSC_DEADLINE disabled due to Errata" on CPUs without the feature x86/idt: Remove the tracing IDT leftovers x86/idt: Hide set_intr_gate() x86/idt: Simplify alloc_intr_gate() x86/idt: Deinline setup functions x86/idt: Remove unused functions/inlines x86/idt: Move interrupt gate initialization to IDT code x86/idt: Move APIC gate initialization to tables x86/idt: Move regular trap init to tables x86/idt: Move IST stack based traps to table init x86/idt: Move debug stack init to table based x86/idt: Switch early trap init to IDT tables x86/idt: Prepare for table based init x86/idt: Move early IDT setup out of 32-bit asm x86/idt: Move early IDT handler setup to IDT code x86/idt: Consolidate IDT invalidation ...
This commit is contained in:
commit
24e700e291
58 changed files with 908 additions and 1084 deletions
|
@ -1058,7 +1058,7 @@ struct boot_params *efi_main(struct efi_config *c,
|
|||
desc->s = DESC_TYPE_CODE_DATA;
|
||||
desc->dpl = 0;
|
||||
desc->p = 1;
|
||||
desc->limit = 0xf;
|
||||
desc->limit1 = 0xf;
|
||||
desc->avl = 0;
|
||||
desc->l = 0;
|
||||
desc->d = SEG_OP_SIZE_32BIT;
|
||||
|
@ -1078,7 +1078,7 @@ struct boot_params *efi_main(struct efi_config *c,
|
|||
desc->s = DESC_TYPE_CODE_DATA;
|
||||
desc->dpl = 0;
|
||||
desc->p = 1;
|
||||
desc->limit = 0xf;
|
||||
desc->limit1 = 0xf;
|
||||
desc->avl = 0;
|
||||
if (IS_ENABLED(CONFIG_X86_64)) {
|
||||
desc->l = 1;
|
||||
|
@ -1099,7 +1099,7 @@ struct boot_params *efi_main(struct efi_config *c,
|
|||
desc->s = DESC_TYPE_CODE_DATA;
|
||||
desc->dpl = 0;
|
||||
desc->p = 1;
|
||||
desc->limit = 0xf;
|
||||
desc->limit1 = 0xf;
|
||||
desc->avl = 0;
|
||||
desc->l = 0;
|
||||
desc->d = SEG_OP_SIZE_32BIT;
|
||||
|
@ -1116,7 +1116,7 @@ struct boot_params *efi_main(struct efi_config *c,
|
|||
desc->s = 0;
|
||||
desc->dpl = 0;
|
||||
desc->p = 1;
|
||||
desc->limit = 0x0;
|
||||
desc->limit1 = 0x0;
|
||||
desc->avl = 0;
|
||||
desc->l = 0;
|
||||
desc->d = 0;
|
||||
|
|
|
@ -673,16 +673,8 @@ ENTRY(name) \
|
|||
jmp ret_from_intr; \
|
||||
ENDPROC(name)
|
||||
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
# define TRACE_BUILD_INTERRUPT(name, nr) BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
|
||||
#else
|
||||
# define TRACE_BUILD_INTERRUPT(name, nr)
|
||||
#endif
|
||||
|
||||
#define BUILD_INTERRUPT(name, nr) \
|
||||
BUILD_INTERRUPT3(name, nr, smp_##name); \
|
||||
TRACE_BUILD_INTERRUPT(name, nr)
|
||||
|
||||
/* The include is where all of the SMP etc. interrupts come from */
|
||||
#include <asm/entry_arch.h>
|
||||
|
@ -880,25 +872,17 @@ ENTRY(xen_failsafe_callback)
|
|||
ENDPROC(xen_failsafe_callback)
|
||||
|
||||
BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
|
||||
xen_evtchn_do_upcall)
|
||||
xen_evtchn_do_upcall)
|
||||
|
||||
#endif /* CONFIG_XEN */
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
|
||||
BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
|
||||
hyperv_vector_handler)
|
||||
hyperv_vector_handler)
|
||||
|
||||
#endif /* CONFIG_HYPERV */
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
ENTRY(trace_page_fault)
|
||||
ASM_CLAC
|
||||
pushl $trace_do_page_fault
|
||||
jmp common_exception
|
||||
END(trace_page_fault)
|
||||
#endif
|
||||
|
||||
ENTRY(page_fault)
|
||||
ASM_CLAC
|
||||
pushl $do_page_fault
|
||||
|
|
|
@ -748,18 +748,6 @@ ENTRY(\sym)
|
|||
END(\sym)
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
#define trace(sym) trace_##sym
|
||||
#define smp_trace(sym) smp_trace_##sym
|
||||
|
||||
.macro trace_apicinterrupt num sym
|
||||
apicinterrupt3 \num trace(\sym) smp_trace(\sym)
|
||||
.endm
|
||||
#else
|
||||
.macro trace_apicinterrupt num sym do_sym
|
||||
.endm
|
||||
#endif
|
||||
|
||||
/* Make sure APIC interrupt handlers end up in the irqentry section: */
|
||||
#define PUSH_SECTION_IRQENTRY .pushsection .irqentry.text, "ax"
|
||||
#define POP_SECTION_IRQENTRY .popsection
|
||||
|
@ -767,7 +755,6 @@ apicinterrupt3 \num trace(\sym) smp_trace(\sym)
|
|||
.macro apicinterrupt num sym do_sym
|
||||
PUSH_SECTION_IRQENTRY
|
||||
apicinterrupt3 \num \sym \do_sym
|
||||
trace_apicinterrupt \num \sym
|
||||
POP_SECTION_IRQENTRY
|
||||
.endm
|
||||
|
||||
|
@ -829,7 +816,6 @@ ENTRY(\sym)
|
|||
.endif
|
||||
|
||||
ASM_CLAC
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
|
||||
.ifeq \has_error_code
|
||||
pushq $-1 /* ORIG_RAX: no syscall to restart */
|
||||
|
@ -913,17 +899,6 @@ ENTRY(\sym)
|
|||
END(\sym)
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
.macro trace_idtentry sym do_sym has_error_code:req
|
||||
idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
|
||||
idtentry \sym \do_sym has_error_code=\has_error_code
|
||||
.endm
|
||||
#else
|
||||
.macro trace_idtentry sym do_sym has_error_code:req
|
||||
idtentry \sym \do_sym has_error_code=\has_error_code
|
||||
.endm
|
||||
#endif
|
||||
|
||||
idtentry divide_error do_divide_error has_error_code=0
|
||||
idtentry overflow do_overflow has_error_code=0
|
||||
idtentry bounds do_bounds has_error_code=0
|
||||
|
@ -986,7 +961,7 @@ ENTRY(do_softirq_own_stack)
|
|||
ENDPROC(do_softirq_own_stack)
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
|
||||
idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
|
||||
|
||||
/*
|
||||
* A note on the "critical region" in our callback handler.
|
||||
|
@ -1053,8 +1028,6 @@ ENTRY(xen_failsafe_callback)
|
|||
movq 8(%rsp), %r11
|
||||
addq $0x30, %rsp
|
||||
pushq $0 /* RIP */
|
||||
pushq %r11
|
||||
pushq %rcx
|
||||
UNWIND_HINT_IRET_REGS offset=8
|
||||
jmp general_protection
|
||||
1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
|
||||
|
@ -1085,13 +1058,12 @@ idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
|
|||
idtentry stack_segment do_stack_segment has_error_code=1
|
||||
|
||||
#ifdef CONFIG_XEN
|
||||
idtentry xen_debug do_debug has_error_code=0
|
||||
idtentry xen_int3 do_int3 has_error_code=0
|
||||
idtentry xen_stack_segment do_stack_segment has_error_code=1
|
||||
idtentry xendebug do_debug has_error_code=0
|
||||
idtentry xenint3 do_int3 has_error_code=0
|
||||
#endif
|
||||
|
||||
idtentry general_protection do_general_protection has_error_code=1
|
||||
trace_idtentry page_fault do_page_fault has_error_code=1
|
||||
idtentry page_fault do_page_fault has_error_code=1
|
||||
|
||||
#ifdef CONFIG_KVM_GUEST
|
||||
idtentry async_page_fault do_async_page_fault has_error_code=1
|
||||
|
@ -1251,20 +1223,9 @@ ENTRY(error_exit)
|
|||
END(error_exit)
|
||||
|
||||
/* Runs on exception stack */
|
||||
/* XXX: broken on Xen PV */
|
||||
ENTRY(nmi)
|
||||
UNWIND_HINT_IRET_REGS
|
||||
/*
|
||||
* Fix up the exception frame if we're on Xen.
|
||||
* PARAVIRT_ADJUST_EXCEPTION_FRAME is guaranteed to push at most
|
||||
* one value to the stack on native, so it may clobber the rdx
|
||||
* scratch slot, but it won't clobber any of the important
|
||||
* slots past it.
|
||||
*
|
||||
* Xen is a different story, because the Xen frame itself overlaps
|
||||
* the "NMI executing" variable.
|
||||
*/
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
|
||||
/*
|
||||
* We allow breakpoints in NMIs. If a breakpoint occurs, then
|
||||
* the iretq it performs will take us out of NMI context.
|
||||
|
|
|
@ -293,7 +293,6 @@ ENTRY(entry_INT80_compat)
|
|||
/*
|
||||
* Interrupts are off on entry.
|
||||
*/
|
||||
PARAVIRT_ADJUST_EXCEPTION_FRAME
|
||||
ASM_CLAC /* Do this early to minimize exposure */
|
||||
SWAPGS
|
||||
|
||||
|
|
|
@ -351,7 +351,7 @@ static void vgetcpu_cpu_init(void *arg)
|
|||
* and 8 bits for the node)
|
||||
*/
|
||||
d.limit0 = cpu | ((node & 0xf) << 12);
|
||||
d.limit = node >> 4;
|
||||
d.limit1 = node >> 4;
|
||||
d.type = 5; /* RO data, expand down, accessed */
|
||||
d.dpl = 3; /* Visible to user code */
|
||||
d.s = 1; /* Not a system segment */
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <asm/ldt.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/irq_vectors.h>
|
||||
|
||||
#include <linux/smp.h>
|
||||
#include <linux/percpu.h>
|
||||
|
@ -22,7 +23,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
|
|||
desc->s = 1;
|
||||
desc->dpl = 0x3;
|
||||
desc->p = info->seg_not_present ^ 1;
|
||||
desc->limit = (info->limit & 0xf0000) >> 16;
|
||||
desc->limit1 = (info->limit & 0xf0000) >> 16;
|
||||
desc->avl = info->useable;
|
||||
desc->d = info->seg_32bit;
|
||||
desc->g = info->limit_in_pages;
|
||||
|
@ -83,33 +84,25 @@ static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
|
|||
return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
||||
static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
|
||||
unsigned dpl, unsigned ist, unsigned seg)
|
||||
{
|
||||
gate->offset_low = PTR_LOW(func);
|
||||
gate->offset_low = (u16) func;
|
||||
gate->bits.p = 1;
|
||||
gate->bits.dpl = dpl;
|
||||
gate->bits.zero = 0;
|
||||
gate->bits.type = type;
|
||||
gate->offset_middle = (u16) (func >> 16);
|
||||
#ifdef CONFIG_X86_64
|
||||
gate->segment = __KERNEL_CS;
|
||||
gate->ist = ist;
|
||||
gate->p = 1;
|
||||
gate->dpl = dpl;
|
||||
gate->zero0 = 0;
|
||||
gate->zero1 = 0;
|
||||
gate->type = type;
|
||||
gate->offset_middle = PTR_MIDDLE(func);
|
||||
gate->offset_high = PTR_HIGH(func);
|
||||
}
|
||||
|
||||
gate->bits.ist = ist;
|
||||
gate->reserved = 0;
|
||||
gate->offset_high = (u32) (func >> 32);
|
||||
#else
|
||||
static inline void pack_gate(gate_desc *gate, unsigned char type,
|
||||
unsigned long base, unsigned dpl, unsigned flags,
|
||||
unsigned short seg)
|
||||
{
|
||||
gate->a = (seg << 16) | (base & 0xffff);
|
||||
gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
|
||||
}
|
||||
|
||||
gate->segment = seg;
|
||||
gate->bits.ist = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline int desc_empty(const void *ptr)
|
||||
{
|
||||
|
@ -173,35 +166,22 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
|
|||
memcpy(&gdt[entry], desc, size);
|
||||
}
|
||||
|
||||
static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
|
||||
unsigned long limit, unsigned char type,
|
||||
unsigned char flags)
|
||||
static inline void set_tssldt_descriptor(void *d, unsigned long addr,
|
||||
unsigned type, unsigned size)
|
||||
{
|
||||
desc->a = ((base & 0xffff) << 16) | (limit & 0xffff);
|
||||
desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) |
|
||||
(limit & 0x000f0000) | ((type & 0xff) << 8) |
|
||||
((flags & 0xf) << 20);
|
||||
desc->p = 1;
|
||||
}
|
||||
|
||||
|
||||
static inline void set_tssldt_descriptor(void *d, unsigned long addr, unsigned type, unsigned size)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
struct ldttss_desc64 *desc = d;
|
||||
struct ldttss_desc *desc = d;
|
||||
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
|
||||
desc->limit0 = size & 0xFFFF;
|
||||
desc->base0 = PTR_LOW(addr);
|
||||
desc->base1 = PTR_MIDDLE(addr) & 0xFF;
|
||||
desc->limit0 = (u16) size;
|
||||
desc->base0 = (u16) addr;
|
||||
desc->base1 = (addr >> 16) & 0xFF;
|
||||
desc->type = type;
|
||||
desc->p = 1;
|
||||
desc->limit1 = (size >> 16) & 0xF;
|
||||
desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF;
|
||||
desc->base3 = PTR_HIGH(addr);
|
||||
#else
|
||||
pack_descriptor((struct desc_struct *)d, addr, size, 0x80 | type, 0);
|
||||
desc->base2 = (addr >> 24) & 0xFF;
|
||||
#ifdef CONFIG_X86_64
|
||||
desc->base3 = (u32) (addr >> 32);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -401,147 +381,20 @@ static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
|
|||
|
||||
static inline unsigned long get_desc_limit(const struct desc_struct *desc)
|
||||
{
|
||||
return desc->limit0 | (desc->limit << 16);
|
||||
return desc->limit0 | (desc->limit1 << 16);
|
||||
}
|
||||
|
||||
static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
|
||||
{
|
||||
desc->limit0 = limit & 0xffff;
|
||||
desc->limit = (limit >> 16) & 0xf;
|
||||
desc->limit1 = (limit >> 16) & 0xf;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static inline void set_nmi_gate(int gate, void *addr)
|
||||
{
|
||||
gate_desc s;
|
||||
void update_intr_gate(unsigned int n, const void *addr);
|
||||
void alloc_intr_gate(unsigned int n, const void *addr);
|
||||
|
||||
pack_gate(&s, GATE_INTERRUPT, (unsigned long)addr, 0, 0, __KERNEL_CS);
|
||||
write_idt_entry(debug_idt_table, gate, &s);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern struct desc_ptr trace_idt_descr;
|
||||
extern gate_desc trace_idt_table[];
|
||||
static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
|
||||
{
|
||||
write_idt_entry(trace_idt_table, entry, gate);
|
||||
}
|
||||
|
||||
static inline void _trace_set_gate(int gate, unsigned type, void *addr,
|
||||
unsigned dpl, unsigned ist, unsigned seg)
|
||||
{
|
||||
gate_desc s;
|
||||
|
||||
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
|
||||
/*
|
||||
* does not need to be atomic because it is only done once at
|
||||
* setup time
|
||||
*/
|
||||
write_trace_idt_entry(gate, &s);
|
||||
}
|
||||
#else
|
||||
static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
|
||||
{
|
||||
}
|
||||
|
||||
#define _trace_set_gate(gate, type, addr, dpl, ist, seg)
|
||||
#endif
|
||||
|
||||
static inline void _set_gate(int gate, unsigned type, void *addr,
|
||||
unsigned dpl, unsigned ist, unsigned seg)
|
||||
{
|
||||
gate_desc s;
|
||||
|
||||
pack_gate(&s, type, (unsigned long)addr, dpl, ist, seg);
|
||||
/*
|
||||
* does not need to be atomic because it is only done once at
|
||||
* setup time
|
||||
*/
|
||||
write_idt_entry(idt_table, gate, &s);
|
||||
write_trace_idt_entry(gate, &s);
|
||||
}
|
||||
|
||||
/*
|
||||
* This needs to use 'idt_table' rather than 'idt', and
|
||||
* thus use the _nonmapped_ version of the IDT, as the
|
||||
* Pentium F0 0F bugfix can have resulted in the mapped
|
||||
* IDT being write-protected.
|
||||
*/
|
||||
#define set_intr_gate_notrace(n, addr) \
|
||||
do { \
|
||||
BUG_ON((unsigned)n > 0xFF); \
|
||||
_set_gate(n, GATE_INTERRUPT, (void *)addr, 0, 0, \
|
||||
__KERNEL_CS); \
|
||||
} while (0)
|
||||
|
||||
#define set_intr_gate(n, addr) \
|
||||
do { \
|
||||
set_intr_gate_notrace(n, addr); \
|
||||
_trace_set_gate(n, GATE_INTERRUPT, (void *)trace_##addr,\
|
||||
0, 0, __KERNEL_CS); \
|
||||
} while (0)
|
||||
|
||||
extern int first_system_vector;
|
||||
/* used_vectors is BITMAP for irq is not managed by percpu vector_irq */
|
||||
extern unsigned long used_vectors[];
|
||||
|
||||
static inline void alloc_system_vector(int vector)
|
||||
{
|
||||
if (!test_bit(vector, used_vectors)) {
|
||||
set_bit(vector, used_vectors);
|
||||
if (first_system_vector > vector)
|
||||
first_system_vector = vector;
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
#define alloc_intr_gate(n, addr) \
|
||||
do { \
|
||||
alloc_system_vector(n); \
|
||||
set_intr_gate(n, addr); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* This routine sets up an interrupt gate at directory privilege level 3.
|
||||
*/
|
||||
static inline void set_system_intr_gate(unsigned int n, void *addr)
|
||||
{
|
||||
BUG_ON((unsigned)n > 0xFF);
|
||||
_set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
|
||||
}
|
||||
|
||||
static inline void set_system_trap_gate(unsigned int n, void *addr)
|
||||
{
|
||||
BUG_ON((unsigned)n > 0xFF);
|
||||
_set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
|
||||
}
|
||||
|
||||
static inline void set_trap_gate(unsigned int n, void *addr)
|
||||
{
|
||||
BUG_ON((unsigned)n > 0xFF);
|
||||
_set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
|
||||
}
|
||||
|
||||
static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
|
||||
{
|
||||
BUG_ON((unsigned)n > 0xFF);
|
||||
_set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
|
||||
}
|
||||
|
||||
static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
|
||||
{
|
||||
BUG_ON((unsigned)n > 0xFF);
|
||||
_set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
|
||||
}
|
||||
|
||||
static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
|
||||
{
|
||||
BUG_ON((unsigned)n > 0xFF);
|
||||
_set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
DECLARE_PER_CPU(u32, debug_idt_ctr);
|
||||
static inline bool is_debug_idt_enabled(void)
|
||||
|
@ -567,31 +420,6 @@ static inline void load_debug_idt(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
extern atomic_t trace_idt_ctr;
|
||||
static inline bool is_trace_idt_enabled(void)
|
||||
{
|
||||
if (atomic_read(&trace_idt_ctr))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void load_trace_idt(void)
|
||||
{
|
||||
load_idt((const struct desc_ptr *)&trace_idt_descr);
|
||||
}
|
||||
#else
|
||||
static inline bool is_trace_idt_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void load_trace_idt(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The load_current_idt() must be called with interrupts disabled
|
||||
* to avoid races. That way the IDT will always be set back to the expected
|
||||
|
@ -603,9 +431,25 @@ static inline void load_current_idt(void)
|
|||
{
|
||||
if (is_debug_idt_enabled())
|
||||
load_debug_idt();
|
||||
else if (is_trace_idt_enabled())
|
||||
load_trace_idt();
|
||||
else
|
||||
load_idt((const struct desc_ptr *)&idt_descr);
|
||||
}
|
||||
|
||||
extern void idt_setup_early_handler(void);
|
||||
extern void idt_setup_early_traps(void);
|
||||
extern void idt_setup_traps(void);
|
||||
extern void idt_setup_apic_and_irq_gates(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern void idt_setup_early_pf(void);
|
||||
extern void idt_setup_ist_traps(void);
|
||||
extern void idt_setup_debugidt_traps(void);
|
||||
#else
|
||||
static inline void idt_setup_early_pf(void) { }
|
||||
static inline void idt_setup_ist_traps(void) { }
|
||||
static inline void idt_setup_debugidt_traps(void) { }
|
||||
#endif
|
||||
|
||||
extern void idt_invalidate(void *addr);
|
||||
|
||||
#endif /* _ASM_X86_DESC_H */
|
||||
|
|
|
@ -11,34 +11,30 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* FIXME: Accessing the desc_struct through its fields is more elegant,
|
||||
* and should be the one valid thing to do. However, a lot of open code
|
||||
* still touches the a and b accessors, and doing this allow us to do it
|
||||
* incrementally. We keep the signature as a struct, rather than a union,
|
||||
* so we can get rid of it transparently in the future -- glommer
|
||||
*/
|
||||
/* 8 byte segment descriptor */
|
||||
struct desc_struct {
|
||||
union {
|
||||
struct {
|
||||
unsigned int a;
|
||||
unsigned int b;
|
||||
};
|
||||
struct {
|
||||
u16 limit0;
|
||||
u16 base0;
|
||||
unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
|
||||
unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
|
||||
};
|
||||
};
|
||||
u16 limit0;
|
||||
u16 base0;
|
||||
u16 base1: 8, type: 4, s: 1, dpl: 2, p: 1;
|
||||
u16 limit1: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define GDT_ENTRY_INIT(flags, base, limit) { { { \
|
||||
.a = ((limit) & 0xffff) | (((base) & 0xffff) << 16), \
|
||||
.b = (((base) & 0xff0000) >> 16) | (((flags) & 0xf0ff) << 8) | \
|
||||
((limit) & 0xf0000) | ((base) & 0xff000000), \
|
||||
} } }
|
||||
#define GDT_ENTRY_INIT(flags, base, limit) \
|
||||
{ \
|
||||
.limit0 = (u16) (limit), \
|
||||
.limit1 = ((limit) >> 16) & 0x0F, \
|
||||
.base0 = (u16) (base), \
|
||||
.base1 = ((base) >> 16) & 0xFF, \
|
||||
.base2 = ((base) >> 24) & 0xFF, \
|
||||
.type = (flags & 0x0f), \
|
||||
.s = (flags >> 4) & 0x01, \
|
||||
.dpl = (flags >> 5) & 0x03, \
|
||||
.p = (flags >> 7) & 0x01, \
|
||||
.avl = (flags >> 12) & 0x01, \
|
||||
.l = (flags >> 13) & 0x01, \
|
||||
.d = (flags >> 14) & 0x01, \
|
||||
.g = (flags >> 15) & 0x01, \
|
||||
}
|
||||
|
||||
enum {
|
||||
GATE_INTERRUPT = 0xE,
|
||||
|
@ -47,49 +43,63 @@ enum {
|
|||
GATE_TASK = 0x5,
|
||||
};
|
||||
|
||||
/* 16byte gate */
|
||||
struct gate_struct64 {
|
||||
u16 offset_low;
|
||||
u16 segment;
|
||||
unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
|
||||
u16 offset_middle;
|
||||
u32 offset_high;
|
||||
u32 zero1;
|
||||
} __attribute__((packed));
|
||||
|
||||
#define PTR_LOW(x) ((unsigned long long)(x) & 0xFFFF)
|
||||
#define PTR_MIDDLE(x) (((unsigned long long)(x) >> 16) & 0xFFFF)
|
||||
#define PTR_HIGH(x) ((unsigned long long)(x) >> 32)
|
||||
|
||||
enum {
|
||||
DESC_TSS = 0x9,
|
||||
DESC_LDT = 0x2,
|
||||
DESCTYPE_S = 0x10, /* !system */
|
||||
};
|
||||
|
||||
/* LDT or TSS descriptor in the GDT. 16 bytes. */
|
||||
struct ldttss_desc64 {
|
||||
u16 limit0;
|
||||
u16 base0;
|
||||
unsigned base1 : 8, type : 5, dpl : 2, p : 1;
|
||||
unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
|
||||
u32 base3;
|
||||
u32 zero1;
|
||||
/* LDT or TSS descriptor in the GDT. */
|
||||
struct ldttss_desc {
|
||||
u16 limit0;
|
||||
u16 base0;
|
||||
|
||||
u16 base1 : 8, type : 5, dpl : 2, p : 1;
|
||||
u16 limit1 : 4, zero0 : 3, g : 1, base2 : 8;
|
||||
#ifdef CONFIG_X86_64
|
||||
u32 base3;
|
||||
u32 zero1;
|
||||
#endif
|
||||
} __attribute__((packed));
|
||||
|
||||
typedef struct ldttss_desc ldt_desc;
|
||||
typedef struct ldttss_desc tss_desc;
|
||||
|
||||
struct idt_bits {
|
||||
u16 ist : 3,
|
||||
zero : 5,
|
||||
type : 5,
|
||||
dpl : 2,
|
||||
p : 1;
|
||||
} __attribute__((packed));
|
||||
|
||||
struct gate_struct {
|
||||
u16 offset_low;
|
||||
u16 segment;
|
||||
struct idt_bits bits;
|
||||
u16 offset_middle;
|
||||
#ifdef CONFIG_X86_64
|
||||
typedef struct gate_struct64 gate_desc;
|
||||
typedef struct ldttss_desc64 ldt_desc;
|
||||
typedef struct ldttss_desc64 tss_desc;
|
||||
#define gate_offset(g) ((g).offset_low | ((unsigned long)(g).offset_middle << 16) | ((unsigned long)(g).offset_high << 32))
|
||||
#define gate_segment(g) ((g).segment)
|
||||
#else
|
||||
typedef struct desc_struct gate_desc;
|
||||
typedef struct desc_struct ldt_desc;
|
||||
typedef struct desc_struct tss_desc;
|
||||
#define gate_offset(g) (((g).b & 0xffff0000) | ((g).a & 0x0000ffff))
|
||||
#define gate_segment(g) ((g).a >> 16)
|
||||
u32 offset_high;
|
||||
u32 reserved;
|
||||
#endif
|
||||
} __attribute__((packed));
|
||||
|
||||
typedef struct gate_struct gate_desc;
|
||||
|
||||
static inline unsigned long gate_offset(const gate_desc *g)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
return g->offset_low | ((unsigned long)g->offset_middle << 16) |
|
||||
((unsigned long) g->offset_high << 32);
|
||||
#else
|
||||
return g->offset_low | ((unsigned long)g->offset_middle << 16);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline unsigned long gate_segment(const gate_desc *g)
|
||||
{
|
||||
return g->segment;
|
||||
}
|
||||
|
||||
struct desc_ptr {
|
||||
unsigned short size;
|
||||
|
|
|
@ -13,20 +13,14 @@
|
|||
BUILD_INTERRUPT(reschedule_interrupt,RESCHEDULE_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_interrupt,CALL_FUNCTION_VECTOR)
|
||||
BUILD_INTERRUPT(call_function_single_interrupt,CALL_FUNCTION_SINGLE_VECTOR)
|
||||
BUILD_INTERRUPT3(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR,
|
||||
smp_irq_move_cleanup_interrupt)
|
||||
BUILD_INTERRUPT3(reboot_interrupt, REBOOT_VECTOR, smp_reboot_interrupt)
|
||||
BUILD_INTERRUPT(irq_move_cleanup_interrupt, IRQ_MOVE_CLEANUP_VECTOR)
|
||||
BUILD_INTERRUPT(reboot_interrupt, REBOOT_VECTOR)
|
||||
#endif
|
||||
|
||||
BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
|
||||
smp_kvm_posted_intr_ipi)
|
||||
BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR,
|
||||
smp_kvm_posted_intr_wakeup_ipi)
|
||||
BUILD_INTERRUPT3(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR,
|
||||
smp_kvm_posted_intr_nested_ipi)
|
||||
BUILD_INTERRUPT(kvm_posted_intr_ipi, POSTED_INTR_VECTOR)
|
||||
BUILD_INTERRUPT(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR)
|
||||
BUILD_INTERRUPT(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -41,6 +35,7 @@ BUILD_INTERRUPT3(kvm_posted_intr_nested_ipi, POSTED_INTR_NESTED_VECTOR,
|
|||
BUILD_INTERRUPT(apic_timer_interrupt,LOCAL_TIMER_VECTOR)
|
||||
BUILD_INTERRUPT(error_interrupt,ERROR_APIC_VECTOR)
|
||||
BUILD_INTERRUPT(spurious_interrupt,SPURIOUS_APIC_VECTOR)
|
||||
BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
|
||||
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
BUILD_INTERRUPT(irq_work_interrupt, IRQ_WORK_VECTOR)
|
||||
|
|
|
@ -46,26 +46,6 @@ extern asmlinkage void deferred_error_interrupt(void);
|
|||
extern asmlinkage void call_function_interrupt(void);
|
||||
extern asmlinkage void call_function_single_interrupt(void);
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
/* Interrupt handlers registered during init_IRQ */
|
||||
extern void trace_apic_timer_interrupt(void);
|
||||
extern void trace_x86_platform_ipi(void);
|
||||
extern void trace_error_interrupt(void);
|
||||
extern void trace_irq_work_interrupt(void);
|
||||
extern void trace_spurious_interrupt(void);
|
||||
extern void trace_thermal_interrupt(void);
|
||||
extern void trace_reschedule_interrupt(void);
|
||||
extern void trace_threshold_interrupt(void);
|
||||
extern void trace_deferred_error_interrupt(void);
|
||||
extern void trace_call_function_interrupt(void);
|
||||
extern void trace_call_function_single_interrupt(void);
|
||||
#define trace_irq_move_cleanup_interrupt irq_move_cleanup_interrupt
|
||||
#define trace_reboot_interrupt reboot_interrupt
|
||||
#define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
|
||||
#define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi
|
||||
#define trace_kvm_posted_intr_nested_ipi kvm_posted_intr_nested_ipi
|
||||
#endif /* CONFIG_TRACING */
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
struct irq_data;
|
||||
struct pci_dev;
|
||||
|
|
|
@ -42,10 +42,6 @@ extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
|
|||
|
||||
extern __visible unsigned int do_IRQ(struct pt_regs *regs);
|
||||
|
||||
/* Interrupt vector management */
|
||||
extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
||||
extern int vector_used_by_percpu_irq(unsigned int vector);
|
||||
|
||||
extern void init_ISA_irqs(void);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
|
|
@ -3,9 +3,17 @@
|
|||
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static inline bool arch_irq_work_has_interrupt(void)
|
||||
{
|
||||
return boot_cpu_has(X86_FEATURE_APIC);
|
||||
}
|
||||
extern void arch_irq_work_raise(void);
|
||||
#else
|
||||
static inline bool arch_irq_work_has_interrupt(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_IRQ_WORK_H */
|
||||
|
|
|
@ -960,11 +960,6 @@ extern void default_banner(void);
|
|||
#define GET_CR2_INTO_RAX \
|
||||
call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr2)
|
||||
|
||||
#define PARAVIRT_ADJUST_EXCEPTION_FRAME \
|
||||
PARA_SITE(PARA_PATCH(pv_irq_ops, PV_IRQ_adjust_exception_frame), \
|
||||
CLBR_NONE, \
|
||||
call PARA_INDIRECT(pv_irq_ops+PV_IRQ_adjust_exception_frame))
|
||||
|
||||
#define USERGS_SYSRET64 \
|
||||
PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_usergs_sysret64), \
|
||||
CLBR_NONE, \
|
||||
|
|
|
@ -196,9 +196,6 @@ struct pv_irq_ops {
|
|||
void (*safe_halt)(void);
|
||||
void (*halt)(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
void (*adjust_exception_frame)(void);
|
||||
#endif
|
||||
} __no_randomize_layout;
|
||||
|
||||
struct pv_mmu_ops {
|
||||
|
|
|
@ -24,6 +24,9 @@ void entry_SYSENTER_compat(void);
|
|||
void __end_entry_SYSENTER_compat(void);
|
||||
void entry_SYSCALL_compat(void);
|
||||
void entry_INT80_compat(void);
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
|
||||
void xen_entry_INT80_compat(void);
|
||||
#endif
|
||||
#endif
|
||||
|
||||
void x86_configure_nx(void);
|
||||
|
|
|
@ -238,9 +238,7 @@
|
|||
#ifndef __ASSEMBLY__
|
||||
|
||||
extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
|
||||
#ifdef CONFIG_TRACING
|
||||
# define trace_early_idt_handler_array early_idt_handler_array
|
||||
#endif
|
||||
extern void early_ignore_irq(void);
|
||||
|
||||
/*
|
||||
* Load a segment. Fall back on loading the zero segment if something goes
|
||||
|
|
16
arch/x86/include/asm/trace/common.h
Normal file
16
arch/x86/include/asm/trace/common.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
#ifndef _ASM_TRACE_COMMON_H
|
||||
#define _ASM_TRACE_COMMON_H
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
DECLARE_STATIC_KEY_FALSE(trace_pagefault_key);
|
||||
#define trace_pagefault_enabled() \
|
||||
static_branch_unlikely(&trace_pagefault_key)
|
||||
DECLARE_STATIC_KEY_FALSE(trace_resched_ipi_key);
|
||||
#define trace_resched_ipi_enabled() \
|
||||
static_branch_unlikely(&trace_resched_ipi_key)
|
||||
#else
|
||||
static inline bool trace_pagefault_enabled(void) { return false; }
|
||||
static inline bool trace_resched_ipi_enabled(void) { return false; }
|
||||
#endif
|
||||
|
||||
#endif
|
|
@ -5,9 +5,10 @@
|
|||
#define _TRACE_PAGE_FAULT_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <asm/trace/common.h>
|
||||
|
||||
extern int trace_irq_vector_regfunc(void);
|
||||
extern void trace_irq_vector_unregfunc(void);
|
||||
extern int trace_pagefault_reg(void);
|
||||
extern void trace_pagefault_unreg(void);
|
||||
|
||||
DECLARE_EVENT_CLASS(x86_exceptions,
|
||||
|
||||
|
@ -37,8 +38,7 @@ DEFINE_EVENT_FN(x86_exceptions, name, \
|
|||
TP_PROTO(unsigned long address, struct pt_regs *regs, \
|
||||
unsigned long error_code), \
|
||||
TP_ARGS(address, regs, error_code), \
|
||||
trace_irq_vector_regfunc, \
|
||||
trace_irq_vector_unregfunc);
|
||||
trace_pagefault_reg, trace_pagefault_unreg);
|
||||
|
||||
DEFINE_PAGE_FAULT_EVENT(page_fault_user);
|
||||
DEFINE_PAGE_FAULT_EVENT(page_fault_kernel);
|
||||
|
|
|
@ -5,9 +5,12 @@
|
|||
#define _TRACE_IRQ_VECTORS_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
#include <asm/trace/common.h>
|
||||
|
||||
extern int trace_irq_vector_regfunc(void);
|
||||
extern void trace_irq_vector_unregfunc(void);
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
|
||||
extern int trace_resched_ipi_reg(void);
|
||||
extern void trace_resched_ipi_unreg(void);
|
||||
|
||||
DECLARE_EVENT_CLASS(x86_irq_vector,
|
||||
|
||||
|
@ -26,17 +29,24 @@ DECLARE_EVENT_CLASS(x86_irq_vector,
|
|||
TP_printk("vector=%d", __entry->vector) );
|
||||
|
||||
#define DEFINE_IRQ_VECTOR_EVENT(name) \
|
||||
DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
|
||||
TP_PROTO(int vector), \
|
||||
TP_ARGS(vector), NULL, NULL); \
|
||||
DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
|
||||
TP_PROTO(int vector), \
|
||||
TP_ARGS(vector), NULL, NULL);
|
||||
|
||||
#define DEFINE_RESCHED_IPI_EVENT(name) \
|
||||
DEFINE_EVENT_FN(x86_irq_vector, name##_entry, \
|
||||
TP_PROTO(int vector), \
|
||||
TP_ARGS(vector), \
|
||||
trace_irq_vector_regfunc, \
|
||||
trace_irq_vector_unregfunc); \
|
||||
trace_resched_ipi_reg, \
|
||||
trace_resched_ipi_unreg); \
|
||||
DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
|
||||
TP_PROTO(int vector), \
|
||||
TP_ARGS(vector), \
|
||||
trace_irq_vector_regfunc, \
|
||||
trace_irq_vector_unregfunc);
|
||||
|
||||
trace_resched_ipi_reg, \
|
||||
trace_resched_ipi_unreg);
|
||||
|
||||
/*
|
||||
* local_timer - called when entering/exiting a local timer interrupt
|
||||
|
@ -44,11 +54,6 @@ DEFINE_EVENT_FN(x86_irq_vector, name##_exit, \
|
|||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(local_timer);
|
||||
|
||||
/*
|
||||
* reschedule - called when entering/exiting a reschedule vector handler
|
||||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(reschedule);
|
||||
|
||||
/*
|
||||
* spurious_apic - called when entering/exiting a spurious apic vector handler
|
||||
*/
|
||||
|
@ -65,6 +70,7 @@ DEFINE_IRQ_VECTOR_EVENT(error_apic);
|
|||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(x86_platform_ipi);
|
||||
|
||||
#ifdef CONFIG_IRQ_WORK
|
||||
/*
|
||||
* irq_work - called when entering/exiting a irq work interrupt
|
||||
* vector handler
|
||||
|
@ -81,6 +87,18 @@ DEFINE_IRQ_VECTOR_EVENT(irq_work);
|
|||
* 4) goto 1
|
||||
*/
|
||||
TRACE_EVENT_PERF_PERM(irq_work_exit, is_sampling_event(p_event) ? -EPERM : 0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The ifdef is required because that tracepoint macro hell emits tracepoint
|
||||
* code in files which include this header even if the tracepoint is not
|
||||
* enabled. Brilliant stuff that.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* reschedule - called when entering/exiting a reschedule vector handler
|
||||
*/
|
||||
DEFINE_RESCHED_IPI_EVENT(reschedule);
|
||||
|
||||
/*
|
||||
* call_function - called when entering/exiting a call function interrupt
|
||||
|
@ -93,24 +111,33 @@ DEFINE_IRQ_VECTOR_EVENT(call_function);
|
|||
* single interrupt vector handler
|
||||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(call_function_single);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_THRESHOLD
|
||||
/*
|
||||
* threshold_apic - called when entering/exiting a threshold apic interrupt
|
||||
* vector handler
|
||||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_AMD
|
||||
/*
|
||||
* deferred_error_apic - called when entering/exiting a deferred apic interrupt
|
||||
* vector handler
|
||||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
/*
|
||||
* thermal_apic - called when entering/exiting a thermal apic interrupt
|
||||
* vector handler
|
||||
*/
|
||||
DEFINE_IRQ_VECTOR_EVENT(thermal_apic);
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
|
|
|
@ -13,9 +13,6 @@ asmlinkage void divide_error(void);
|
|||
asmlinkage void debug(void);
|
||||
asmlinkage void nmi(void);
|
||||
asmlinkage void int3(void);
|
||||
asmlinkage void xen_debug(void);
|
||||
asmlinkage void xen_int3(void);
|
||||
asmlinkage void xen_stack_segment(void);
|
||||
asmlinkage void overflow(void);
|
||||
asmlinkage void bounds(void);
|
||||
asmlinkage void invalid_op(void);
|
||||
|
@ -38,22 +35,29 @@ asmlinkage void machine_check(void);
|
|||
#endif /* CONFIG_X86_MCE */
|
||||
asmlinkage void simd_coprocessor_error(void);
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
asmlinkage void trace_page_fault(void);
|
||||
#define trace_stack_segment stack_segment
|
||||
#define trace_divide_error divide_error
|
||||
#define trace_bounds bounds
|
||||
#define trace_invalid_op invalid_op
|
||||
#define trace_device_not_available device_not_available
|
||||
#define trace_coprocessor_segment_overrun coprocessor_segment_overrun
|
||||
#define trace_invalid_TSS invalid_TSS
|
||||
#define trace_segment_not_present segment_not_present
|
||||
#define trace_general_protection general_protection
|
||||
#define trace_spurious_interrupt_bug spurious_interrupt_bug
|
||||
#define trace_coprocessor_error coprocessor_error
|
||||
#define trace_alignment_check alignment_check
|
||||
#define trace_simd_coprocessor_error simd_coprocessor_error
|
||||
#define trace_async_page_fault async_page_fault
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
|
||||
asmlinkage void xen_divide_error(void);
|
||||
asmlinkage void xen_xendebug(void);
|
||||
asmlinkage void xen_xenint3(void);
|
||||
asmlinkage void xen_nmi(void);
|
||||
asmlinkage void xen_overflow(void);
|
||||
asmlinkage void xen_bounds(void);
|
||||
asmlinkage void xen_invalid_op(void);
|
||||
asmlinkage void xen_device_not_available(void);
|
||||
asmlinkage void xen_double_fault(void);
|
||||
asmlinkage void xen_coprocessor_segment_overrun(void);
|
||||
asmlinkage void xen_invalid_TSS(void);
|
||||
asmlinkage void xen_segment_not_present(void);
|
||||
asmlinkage void xen_stack_segment(void);
|
||||
asmlinkage void xen_general_protection(void);
|
||||
asmlinkage void xen_page_fault(void);
|
||||
asmlinkage void xen_spurious_interrupt_bug(void);
|
||||
asmlinkage void xen_coprocessor_error(void);
|
||||
asmlinkage void xen_alignment_check(void);
|
||||
#ifdef CONFIG_X86_MCE
|
||||
asmlinkage void xen_machine_check(void);
|
||||
#endif /* CONFIG_X86_MCE */
|
||||
asmlinkage void xen_simd_coprocessor_error(void);
|
||||
#endif
|
||||
|
||||
dotraplinkage void do_divide_error(struct pt_regs *, long);
|
||||
|
@ -74,14 +78,6 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *);
|
|||
#endif
|
||||
dotraplinkage void do_general_protection(struct pt_regs *, long);
|
||||
dotraplinkage void do_page_fault(struct pt_regs *, unsigned long);
|
||||
#ifdef CONFIG_TRACING
|
||||
dotraplinkage void trace_do_page_fault(struct pt_regs *, unsigned long);
|
||||
#else
|
||||
static inline void trace_do_page_fault(struct pt_regs *regs, unsigned long error)
|
||||
{
|
||||
do_page_fault(regs, error);
|
||||
}
|
||||
#endif
|
||||
dotraplinkage void do_spurious_interrupt_bug(struct pt_regs *, long);
|
||||
dotraplinkage void do_coprocessor_error(struct pt_regs *, long);
|
||||
dotraplinkage void do_alignment_check(struct pt_regs *, long);
|
||||
|
|
|
@ -552,6 +552,8 @@ static inline void
|
|||
MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
|
||||
struct desc_struct desc)
|
||||
{
|
||||
u32 *p = (u32 *) &desc;
|
||||
|
||||
mcl->op = __HYPERVISOR_update_descriptor;
|
||||
if (sizeof(maddr) == sizeof(long)) {
|
||||
mcl->args[0] = maddr;
|
||||
|
@ -559,8 +561,8 @@ MULTI_update_descriptor(struct multicall_entry *mcl, u64 maddr,
|
|||
} else {
|
||||
mcl->args[0] = maddr;
|
||||
mcl->args[1] = maddr >> 32;
|
||||
mcl->args[2] = desc.a;
|
||||
mcl->args[3] = desc.b;
|
||||
mcl->args[2] = *p++;
|
||||
mcl->args[3] = *p;
|
||||
}
|
||||
|
||||
trace_xen_mc_entry(mcl, sizeof(maddr) == sizeof(long) ? 2 : 4);
|
||||
|
|
|
@ -42,7 +42,7 @@ CFLAGS_irq.o := -I$(src)/../include/asm/trace
|
|||
|
||||
obj-y := process_$(BITS).o signal.o
|
||||
obj-$(CONFIG_COMPAT) += signal_compat.o
|
||||
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += traps.o idt.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += time.o ioport.o dumpstack.o nmi.o
|
||||
obj-$(CONFIG_MODIFY_LDT_SYSCALL) += ldt.o
|
||||
obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
|
||||
|
@ -111,6 +111,7 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= paravirt-spinlocks.o
|
|||
obj-$(CONFIG_PARAVIRT_CLOCK) += pvclock.o
|
||||
obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
|
||||
|
||||
obj-$(CONFIG_EISA) += eisa.o
|
||||
obj-$(CONFIG_PCSPKR_PLATFORM) += pcspeaker.o
|
||||
|
||||
obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
|
||||
|
|
|
@ -177,8 +177,6 @@ static int disable_apic_timer __initdata;
|
|||
int local_apic_timer_c2_ok;
|
||||
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
|
||||
|
||||
int first_system_vector = FIRST_SYSTEM_VECTOR;
|
||||
|
||||
/*
|
||||
* Debug level, exported for io_apic.c
|
||||
*/
|
||||
|
@ -599,9 +597,13 @@ static const struct x86_cpu_id deadline_match[] = {
|
|||
|
||||
static void apic_check_deadline_errata(void)
|
||||
{
|
||||
const struct x86_cpu_id *m = x86_match_cpu(deadline_match);
|
||||
const struct x86_cpu_id *m;
|
||||
u32 rev;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER))
|
||||
return;
|
||||
|
||||
m = x86_match_cpu(deadline_match);
|
||||
if (!m)
|
||||
return;
|
||||
|
||||
|
@ -990,8 +992,7 @@ void setup_secondary_APIC_clock(void)
|
|||
*/
|
||||
static void local_apic_timer_interrupt(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
struct clock_event_device *evt = &per_cpu(lapic_events, cpu);
|
||||
struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
|
||||
|
||||
/*
|
||||
* Normally we should not be here till LAPIC has been initialized but
|
||||
|
@ -1005,7 +1006,8 @@ static void local_apic_timer_interrupt(void)
|
|||
* spurious.
|
||||
*/
|
||||
if (!evt->event_handler) {
|
||||
pr_warning("Spurious LAPIC timer interrupt on cpu %d\n", cpu);
|
||||
pr_warning("Spurious LAPIC timer interrupt on cpu %d\n",
|
||||
smp_processor_id());
|
||||
/* Switch it off */
|
||||
lapic_timer_shutdown(evt);
|
||||
return;
|
||||
|
@ -1031,25 +1033,6 @@ __visible void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
|
|||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
/*
|
||||
* NOTE! We'd better ACK the irq immediately,
|
||||
* because timer handling can be slow.
|
||||
*
|
||||
* update_process_times() expects us to have done irq_enter().
|
||||
* Besides, if we don't timer interrupts ignore the global
|
||||
* interrupt lock, which is the WrongThing (tm) to do.
|
||||
*/
|
||||
entering_ack_irq();
|
||||
local_apic_timer_interrupt();
|
||||
exiting_irq();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_trace_apic_timer_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
/*
|
||||
* NOTE! We'd better ACK the irq immediately,
|
||||
* because timer handling can be slow.
|
||||
|
@ -1920,10 +1903,14 @@ void __init register_lapic_address(unsigned long address)
|
|||
/*
|
||||
* This interrupt should _never_ happen with our APIC/SMP architecture
|
||||
*/
|
||||
static void __smp_spurious_interrupt(u8 vector)
|
||||
__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
u8 vector = ~regs->orig_ax;
|
||||
u32 v;
|
||||
|
||||
entering_irq();
|
||||
trace_spurious_apic_entry(vector);
|
||||
|
||||
/*
|
||||
* Check if this really is a spurious interrupt and ACK it
|
||||
* if it is a vectored one. Just in case...
|
||||
|
@ -1938,22 +1925,7 @@ static void __smp_spurious_interrupt(u8 vector)
|
|||
/* see sw-dev-man vol 3, chapter 7.4.13.5 */
|
||||
pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
|
||||
"should never happen.\n", vector, smp_processor_id());
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
entering_irq();
|
||||
__smp_spurious_interrupt(~regs->orig_ax);
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
u8 vector = ~regs->orig_ax;
|
||||
|
||||
entering_irq();
|
||||
trace_spurious_apic_entry(vector);
|
||||
__smp_spurious_interrupt(vector);
|
||||
trace_spurious_apic_exit(vector);
|
||||
exiting_irq();
|
||||
}
|
||||
|
@ -1961,10 +1933,8 @@ __visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
|
|||
/*
|
||||
* This interrupt should never happen with our APIC/SMP architecture
|
||||
*/
|
||||
static void __smp_error_interrupt(struct pt_regs *regs)
|
||||
__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
u32 v;
|
||||
u32 i = 0;
|
||||
static const char * const error_interrupt_reason[] = {
|
||||
"Send CS error", /* APIC Error Bit 0 */
|
||||
"Receive CS error", /* APIC Error Bit 1 */
|
||||
|
@ -1975,6 +1945,10 @@ static void __smp_error_interrupt(struct pt_regs *regs)
|
|||
"Received illegal vector", /* APIC Error Bit 6 */
|
||||
"Illegal register address", /* APIC Error Bit 7 */
|
||||
};
|
||||
u32 v, i = 0;
|
||||
|
||||
entering_irq();
|
||||
trace_error_apic_entry(ERROR_APIC_VECTOR);
|
||||
|
||||
/* First tickle the hardware, only then report what went on. -- REW */
|
||||
if (lapic_get_maxlvt() > 3) /* Due to the Pentium erratum 3AP. */
|
||||
|
@ -1996,20 +1970,6 @@ static void __smp_error_interrupt(struct pt_regs *regs)
|
|||
|
||||
apic_printk(APIC_DEBUG, KERN_CONT "\n");
|
||||
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
entering_irq();
|
||||
__smp_error_interrupt(regs);
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_trace_error_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
entering_irq();
|
||||
trace_error_apic_entry(ERROR_APIC_VECTOR);
|
||||
__smp_error_interrupt(regs);
|
||||
trace_error_apic_exit(ERROR_APIC_VECTOR);
|
||||
exiting_irq();
|
||||
}
|
||||
|
|
|
@ -1243,7 +1243,7 @@ static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
|
|||
entry.vector, entry.irr, entry.delivery_status);
|
||||
if (ir_entry->format)
|
||||
printk(KERN_DEBUG "%s, remapped, I(%04X), Z(%X)\n",
|
||||
buf, (ir_entry->index << 15) | ir_entry->index,
|
||||
buf, (ir_entry->index2 << 15) | ir_entry->index,
|
||||
ir_entry->zero);
|
||||
else
|
||||
printk(KERN_DEBUG "%s, %s, D(%02X), M(%1d)\n",
|
||||
|
|
|
@ -166,7 +166,7 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
|
|||
offset = current_offset;
|
||||
next:
|
||||
vector += 16;
|
||||
if (vector >= first_system_vector) {
|
||||
if (vector >= FIRST_SYSTEM_VECTOR) {
|
||||
offset = (offset + 1) % 16;
|
||||
vector = FIRST_EXTERNAL_VECTOR + offset;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@ static char syscalls_ia32[] = {
|
|||
int main(void)
|
||||
{
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
OFFSET(PV_IRQ_adjust_exception_frame, pv_irq_ops, adjust_exception_frame);
|
||||
OFFSET(PV_CPU_usergs_sysret64, pv_cpu_ops, usergs_sysret64);
|
||||
OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
|
||||
BLANK();
|
||||
|
|
|
@ -1329,15 +1329,6 @@ static __init int setup_disablecpuid(char *arg)
|
|||
__setup("clearcpuid=", setup_disablecpuid);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
struct desc_ptr idt_descr __ro_after_init = {
|
||||
.size = NR_VECTORS * 16 - 1,
|
||||
.address = (unsigned long) idt_table,
|
||||
};
|
||||
const struct desc_ptr debug_idt_descr = {
|
||||
.size = NR_VECTORS * 16 - 1,
|
||||
.address = (unsigned long) debug_idt_table,
|
||||
};
|
||||
|
||||
DEFINE_PER_CPU_FIRST(union irq_stack_union,
|
||||
irq_stack_union) __aligned(PAGE_SIZE) __visible;
|
||||
|
||||
|
|
|
@ -771,24 +771,12 @@ static void __log_error(unsigned int bank, u64 status, u64 addr, u64 misc)
|
|||
mce_log(&m);
|
||||
}
|
||||
|
||||
static inline void __smp_deferred_error_interrupt(void)
|
||||
{
|
||||
inc_irq_stat(irq_deferred_error_count);
|
||||
deferred_error_int_vector();
|
||||
}
|
||||
|
||||
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
|
||||
{
|
||||
entering_irq();
|
||||
__smp_deferred_error_interrupt();
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
||||
asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
|
||||
{
|
||||
entering_irq();
|
||||
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
|
||||
__smp_deferred_error_interrupt();
|
||||
inc_irq_stat(irq_deferred_error_count);
|
||||
deferred_error_int_vector();
|
||||
trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
|
|
@ -390,26 +390,12 @@ static void unexpected_thermal_interrupt(void)
|
|||
|
||||
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
|
||||
|
||||
static inline void __smp_thermal_interrupt(void)
|
||||
{
|
||||
inc_irq_stat(irq_thermal_count);
|
||||
smp_thermal_vector();
|
||||
}
|
||||
|
||||
asmlinkage __visible void __irq_entry
|
||||
smp_thermal_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
entering_irq();
|
||||
__smp_thermal_interrupt();
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
||||
asmlinkage __visible void __irq_entry
|
||||
smp_trace_thermal_interrupt(struct pt_regs *regs)
|
||||
asmlinkage __visible void __irq_entry smp_thermal_interrupt(struct pt_regs *r)
|
||||
{
|
||||
entering_irq();
|
||||
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
|
||||
__smp_thermal_interrupt();
|
||||
inc_irq_stat(irq_thermal_count);
|
||||
smp_thermal_vector();
|
||||
trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
|
|
@ -17,24 +17,12 @@ static void default_threshold_interrupt(void)
|
|||
|
||||
void (*mce_threshold_vector)(void) = default_threshold_interrupt;
|
||||
|
||||
static inline void __smp_threshold_interrupt(void)
|
||||
{
|
||||
inc_irq_stat(irq_threshold_count);
|
||||
mce_threshold_vector();
|
||||
}
|
||||
|
||||
asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
|
||||
{
|
||||
entering_irq();
|
||||
__smp_threshold_interrupt();
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
||||
asmlinkage __visible void __irq_entry smp_trace_threshold_interrupt(void)
|
||||
{
|
||||
entering_irq();
|
||||
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
|
||||
__smp_threshold_interrupt();
|
||||
inc_irq_stat(irq_threshold_count);
|
||||
mce_threshold_vector();
|
||||
trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
|
||||
exiting_ack_irq();
|
||||
}
|
||||
|
|
|
@ -59,13 +59,8 @@ void hyperv_vector_handler(struct pt_regs *regs)
|
|||
void hv_setup_vmbus_irq(void (*handler)(void))
|
||||
{
|
||||
vmbus_handler = handler;
|
||||
/*
|
||||
* Setup the IDT for hypervisor callback. Prevent reallocation
|
||||
* at module reload.
|
||||
*/
|
||||
if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
|
||||
hyperv_callback_vector);
|
||||
/* Setup the IDT for hypervisor callback */
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
|
||||
}
|
||||
|
||||
void hv_remove_vmbus_irq(void)
|
||||
|
|
19
arch/x86/kernel/eisa.c
Normal file
19
arch/x86/kernel/eisa.c
Normal file
|
@ -0,0 +1,19 @@
|
|||
/*
|
||||
* EISA specific code
|
||||
*
|
||||
* This file is licensed under the GPL V2
|
||||
*/
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/eisa.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
static __init int eisa_bus_probe(void)
|
||||
{
|
||||
void __iomem *p = ioremap(0x0FFFD9, 4);
|
||||
|
||||
if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
|
||||
EISA_bus = 1;
|
||||
iounmap(p);
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(eisa_bus_probe);
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/memblock.h>
|
||||
|
||||
#include <asm/desc.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/e820/api.h>
|
||||
|
@ -30,6 +31,9 @@ static void __init i386_default_early_setup(void)
|
|||
asmlinkage __visible void __init i386_start_kernel(void)
|
||||
{
|
||||
cr4_init_shadow();
|
||||
|
||||
idt_setup_early_handler();
|
||||
|
||||
sanitize_boot_params(&boot_params);
|
||||
|
||||
x86_early_init_platform_quirks();
|
||||
|
|
|
@ -311,8 +311,6 @@ static void __init copy_bootdata(char *real_mode_data)
|
|||
|
||||
asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Build-time sanity checks on the kernel image and module
|
||||
* area mappings. (these are purely build-time and produce no code)
|
||||
|
@ -345,9 +343,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
|
|||
|
||||
kasan_early_init();
|
||||
|
||||
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
|
||||
set_intr_gate(i, early_idt_handler_array[i]);
|
||||
load_idt((const struct desc_ptr *)&idt_descr);
|
||||
idt_setup_early_handler();
|
||||
|
||||
copy_bootdata(__va(real_mode_data));
|
||||
|
||||
|
|
|
@ -345,7 +345,6 @@ ENTRY(startup_32_smp)
|
|||
movl %eax,%cr0
|
||||
|
||||
lgdt early_gdt_descr
|
||||
lidt idt_descr
|
||||
ljmp $(__KERNEL_CS),$1f
|
||||
1: movl $(__KERNEL_DS),%eax # reload all the segment registers
|
||||
movl %eax,%ss # after changing gdt.
|
||||
|
@ -378,37 +377,6 @@ ENDPROC(startup_32_smp)
|
|||
*/
|
||||
__INIT
|
||||
setup_once:
|
||||
/*
|
||||
* Set up a idt with 256 interrupt gates that push zero if there
|
||||
* is no error code and then jump to early_idt_handler_common.
|
||||
* It doesn't actually load the idt - that needs to be done on
|
||||
* each CPU. Interrupts are enabled elsewhere, when we can be
|
||||
* relatively sure everything is ok.
|
||||
*/
|
||||
|
||||
movl $idt_table,%edi
|
||||
movl $early_idt_handler_array,%eax
|
||||
movl $NUM_EXCEPTION_VECTORS,%ecx
|
||||
1:
|
||||
movl %eax,(%edi)
|
||||
movl %eax,4(%edi)
|
||||
/* interrupt gate, dpl=0, present */
|
||||
movl $(0x8E000000 + __KERNEL_CS),2(%edi)
|
||||
addl $EARLY_IDT_HANDLER_SIZE,%eax
|
||||
addl $8,%edi
|
||||
loop 1b
|
||||
|
||||
movl $256 - NUM_EXCEPTION_VECTORS,%ecx
|
||||
movl $ignore_int,%edx
|
||||
movl $(__KERNEL_CS << 16),%eax
|
||||
movw %dx,%ax /* selector = 0x0010 = cs */
|
||||
movw $0x8E00,%dx /* interrupt gate - dpl=0, present */
|
||||
2:
|
||||
movl %eax,(%edi)
|
||||
movl %edx,4(%edi)
|
||||
addl $8,%edi
|
||||
loop 2b
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
/*
|
||||
* Configure the stack canary. The linker can't handle this by
|
||||
|
@ -497,8 +465,7 @@ early_idt_handler_common:
|
|||
ENDPROC(early_idt_handler_common)
|
||||
|
||||
/* This is the default interrupt "handler" :-) */
|
||||
ALIGN
|
||||
ignore_int:
|
||||
ENTRY(early_ignore_irq)
|
||||
cld
|
||||
#ifdef CONFIG_PRINTK
|
||||
pushl %eax
|
||||
|
@ -533,7 +500,8 @@ ignore_int:
|
|||
hlt_loop:
|
||||
hlt
|
||||
jmp hlt_loop
|
||||
ENDPROC(ignore_int)
|
||||
ENDPROC(early_ignore_irq)
|
||||
|
||||
__INITDATA
|
||||
.align 4
|
||||
GLOBAL(early_recursion_flag)
|
||||
|
@ -622,7 +590,6 @@ int_msg:
|
|||
|
||||
.data
|
||||
.globl boot_gdt_descr
|
||||
.globl idt_descr
|
||||
|
||||
ALIGN
|
||||
# early boot GDT descriptor (must use 1:1 address mapping)
|
||||
|
@ -631,11 +598,6 @@ boot_gdt_descr:
|
|||
.word __BOOT_DS+7
|
||||
.long boot_gdt - __PAGE_OFFSET
|
||||
|
||||
.word 0 # 32-bit align idt_desc.address
|
||||
idt_descr:
|
||||
.word IDT_ENTRIES*8-1 # idt contains 256 entries
|
||||
.long idt_table
|
||||
|
||||
# boot GDT descriptor (later on used by CPU#0):
|
||||
.word 0 # 32 bit align gdt_desc.address
|
||||
ENTRY(early_gdt_descr)
|
||||
|
|
371
arch/x86/kernel/idt.c
Normal file
371
arch/x86/kernel/idt.c
Normal file
|
@ -0,0 +1,371 @@
|
|||
/*
|
||||
* Interrupt descriptor table related code
|
||||
*
|
||||
* This file is licensed under the GPL V2
|
||||
*/
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/traps.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
struct idt_data {
|
||||
unsigned int vector;
|
||||
unsigned int segment;
|
||||
struct idt_bits bits;
|
||||
const void *addr;
|
||||
};
|
||||
|
||||
#define DPL0 0x0
|
||||
#define DPL3 0x3
|
||||
|
||||
#define DEFAULT_STACK 0
|
||||
|
||||
#define G(_vector, _addr, _ist, _type, _dpl, _segment) \
|
||||
{ \
|
||||
.vector = _vector, \
|
||||
.bits.ist = _ist, \
|
||||
.bits.type = _type, \
|
||||
.bits.dpl = _dpl, \
|
||||
.bits.p = 1, \
|
||||
.addr = _addr, \
|
||||
.segment = _segment, \
|
||||
}
|
||||
|
||||
/* Interrupt gate */
|
||||
#define INTG(_vector, _addr) \
|
||||
G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL0, __KERNEL_CS)
|
||||
|
||||
/* System interrupt gate */
|
||||
#define SYSG(_vector, _addr) \
|
||||
G(_vector, _addr, DEFAULT_STACK, GATE_INTERRUPT, DPL3, __KERNEL_CS)
|
||||
|
||||
/* Interrupt gate with interrupt stack */
|
||||
#define ISTG(_vector, _addr, _ist) \
|
||||
G(_vector, _addr, _ist, GATE_INTERRUPT, DPL0, __KERNEL_CS)
|
||||
|
||||
/* System interrupt gate with interrupt stack */
|
||||
#define SISTG(_vector, _addr, _ist) \
|
||||
G(_vector, _addr, _ist, GATE_INTERRUPT, DPL3, __KERNEL_CS)
|
||||
|
||||
/* Task gate */
|
||||
#define TSKG(_vector, _gdt) \
|
||||
G(_vector, NULL, DEFAULT_STACK, GATE_TASK, DPL0, _gdt << 3)
|
||||
|
||||
/*
|
||||
* Early traps running on the DEFAULT_STACK because the other interrupt
|
||||
* stacks work only after cpu_init().
|
||||
*/
|
||||
static const __initdata struct idt_data early_idts[] = {
|
||||
INTG(X86_TRAP_DB, debug),
|
||||
SYSG(X86_TRAP_BP, int3),
|
||||
#ifdef CONFIG_X86_32
|
||||
INTG(X86_TRAP_PF, page_fault),
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* The default IDT entries which are set up in trap_init() before
|
||||
* cpu_init() is invoked. Interrupt stacks cannot be used at that point and
|
||||
* the traps which use them are reinitialized with IST after cpu_init() has
|
||||
* set up TSS.
|
||||
*/
|
||||
static const __initdata struct idt_data def_idts[] = {
|
||||
INTG(X86_TRAP_DE, divide_error),
|
||||
INTG(X86_TRAP_NMI, nmi),
|
||||
INTG(X86_TRAP_BR, bounds),
|
||||
INTG(X86_TRAP_UD, invalid_op),
|
||||
INTG(X86_TRAP_NM, device_not_available),
|
||||
INTG(X86_TRAP_OLD_MF, coprocessor_segment_overrun),
|
||||
INTG(X86_TRAP_TS, invalid_TSS),
|
||||
INTG(X86_TRAP_NP, segment_not_present),
|
||||
INTG(X86_TRAP_SS, stack_segment),
|
||||
INTG(X86_TRAP_GP, general_protection),
|
||||
INTG(X86_TRAP_SPURIOUS, spurious_interrupt_bug),
|
||||
INTG(X86_TRAP_MF, coprocessor_error),
|
||||
INTG(X86_TRAP_AC, alignment_check),
|
||||
INTG(X86_TRAP_XF, simd_coprocessor_error),
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
TSKG(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS),
|
||||
#else
|
||||
INTG(X86_TRAP_DF, double_fault),
|
||||
#endif
|
||||
INTG(X86_TRAP_DB, debug),
|
||||
INTG(X86_TRAP_NMI, nmi),
|
||||
INTG(X86_TRAP_BP, int3),
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
INTG(X86_TRAP_MC, &machine_check),
|
||||
#endif
|
||||
|
||||
SYSG(X86_TRAP_OF, overflow),
|
||||
#if defined(CONFIG_IA32_EMULATION)
|
||||
SYSG(IA32_SYSCALL_VECTOR, entry_INT80_compat),
|
||||
#elif defined(CONFIG_X86_32)
|
||||
SYSG(IA32_SYSCALL_VECTOR, entry_INT80_32),
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* The APIC and SMP idt entries
|
||||
*/
|
||||
static const __initdata struct idt_data apic_idts[] = {
|
||||
#ifdef CONFIG_SMP
|
||||
INTG(RESCHEDULE_VECTOR, reschedule_interrupt),
|
||||
INTG(CALL_FUNCTION_VECTOR, call_function_interrupt),
|
||||
INTG(CALL_FUNCTION_SINGLE_VECTOR, call_function_single_interrupt),
|
||||
INTG(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt),
|
||||
INTG(REBOOT_VECTOR, reboot_interrupt),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
INTG(THERMAL_APIC_VECTOR, thermal_interrupt),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_THRESHOLD
|
||||
INTG(THRESHOLD_APIC_VECTOR, threshold_interrupt),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_AMD
|
||||
INTG(DEFERRED_ERROR_VECTOR, deferred_error_interrupt),
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
INTG(LOCAL_TIMER_VECTOR, apic_timer_interrupt),
|
||||
INTG(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi),
|
||||
# ifdef CONFIG_HAVE_KVM
|
||||
INTG(POSTED_INTR_VECTOR, kvm_posted_intr_ipi),
|
||||
INTG(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi),
|
||||
INTG(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi),
|
||||
# endif
|
||||
# ifdef CONFIG_IRQ_WORK
|
||||
INTG(IRQ_WORK_VECTOR, irq_work_interrupt),
|
||||
# endif
|
||||
INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt),
|
||||
INTG(ERROR_APIC_VECTOR, error_interrupt),
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Early traps running on the DEFAULT_STACK because the other interrupt
|
||||
* stacks work only after cpu_init().
|
||||
*/
|
||||
static const __initdata struct idt_data early_pf_idts[] = {
|
||||
INTG(X86_TRAP_PF, page_fault),
|
||||
};
|
||||
|
||||
/*
|
||||
* Override for the debug_idt. Same as the default, but with interrupt
|
||||
* stack set to DEFAULT_STACK (0). Required for NMI trap handling.
|
||||
*/
|
||||
static const __initdata struct idt_data dbg_idts[] = {
|
||||
INTG(X86_TRAP_DB, debug),
|
||||
INTG(X86_TRAP_BP, int3),
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Must be page-aligned because the real IDT is used in a fixmap. */
|
||||
gate_desc idt_table[IDT_ENTRIES] __page_aligned_bss;
|
||||
|
||||
struct desc_ptr idt_descr __ro_after_init = {
|
||||
.size = (IDT_ENTRIES * 2 * sizeof(unsigned long)) - 1,
|
||||
.address = (unsigned long) idt_table,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* No need to be aligned, but done to keep all IDTs defined the same way. */
|
||||
gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
|
||||
|
||||
/*
|
||||
* The exceptions which use Interrupt stacks. They are setup after
|
||||
* cpu_init() when the TSS has been initialized.
|
||||
*/
|
||||
static const __initdata struct idt_data ist_idts[] = {
|
||||
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
|
||||
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
|
||||
ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
|
||||
#ifdef CONFIG_X86_MCE
|
||||
ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Override for the debug_idt. Same as the default, but with interrupt
|
||||
* stack set to DEFAULT_STACK (0). Required for NMI trap handling.
|
||||
*/
|
||||
const struct desc_ptr debug_idt_descr = {
|
||||
.size = IDT_ENTRIES * 16 - 1,
|
||||
.address = (unsigned long) debug_idt_table,
|
||||
};
|
||||
#endif
|
||||
|
||||
static inline void idt_init_desc(gate_desc *gate, const struct idt_data *d)
|
||||
{
|
||||
unsigned long addr = (unsigned long) d->addr;
|
||||
|
||||
gate->offset_low = (u16) addr;
|
||||
gate->segment = (u16) d->segment;
|
||||
gate->bits = d->bits;
|
||||
gate->offset_middle = (u16) (addr >> 16);
|
||||
#ifdef CONFIG_X86_64
|
||||
gate->offset_high = (u32) (addr >> 32);
|
||||
gate->reserved = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
static void
|
||||
idt_setup_from_table(gate_desc *idt, const struct idt_data *t, int size, bool sys)
|
||||
{
|
||||
gate_desc desc;
|
||||
|
||||
for (; size > 0; t++, size--) {
|
||||
idt_init_desc(&desc, t);
|
||||
write_idt_entry(idt, t->vector, &desc);
|
||||
if (sys)
|
||||
set_bit(t->vector, used_vectors);
|
||||
}
|
||||
}
|
||||
|
||||
static void set_intr_gate(unsigned int n, const void *addr)
|
||||
{
|
||||
struct idt_data data;
|
||||
|
||||
BUG_ON(n > 0xFF);
|
||||
|
||||
memset(&data, 0, sizeof(data));
|
||||
data.vector = n;
|
||||
data.addr = addr;
|
||||
data.segment = __KERNEL_CS;
|
||||
data.bits.type = GATE_INTERRUPT;
|
||||
data.bits.p = 1;
|
||||
|
||||
idt_setup_from_table(idt_table, &data, 1, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* idt_setup_early_traps - Initialize the idt table with early traps
|
||||
*
|
||||
* On X8664 these traps do not use interrupt stacks as they can't work
|
||||
* before cpu_init() is invoked and sets up TSS. The IST variants are
|
||||
* installed after that.
|
||||
*/
|
||||
void __init idt_setup_early_traps(void)
|
||||
{
|
||||
idt_setup_from_table(idt_table, early_idts, ARRAY_SIZE(early_idts),
|
||||
true);
|
||||
load_idt(&idt_descr);
|
||||
}
|
||||
|
||||
/**
|
||||
* idt_setup_traps - Initialize the idt table with default traps
|
||||
*/
|
||||
void __init idt_setup_traps(void)
|
||||
{
|
||||
idt_setup_from_table(idt_table, def_idts, ARRAY_SIZE(def_idts), true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/**
|
||||
* idt_setup_early_pf - Initialize the idt table with early pagefault handler
|
||||
*
|
||||
* On X8664 this does not use interrupt stacks as they can't work before
|
||||
* cpu_init() is invoked and sets up TSS. The IST variant is installed
|
||||
* after that.
|
||||
*
|
||||
* FIXME: Why is 32bit and 64bit installing the PF handler at different
|
||||
* places in the early setup code?
|
||||
*/
|
||||
void __init idt_setup_early_pf(void)
|
||||
{
|
||||
idt_setup_from_table(idt_table, early_pf_idts,
|
||||
ARRAY_SIZE(early_pf_idts), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* idt_setup_ist_traps - Initialize the idt table with traps using IST
|
||||
*/
|
||||
void __init idt_setup_ist_traps(void)
|
||||
{
|
||||
idt_setup_from_table(idt_table, ist_idts, ARRAY_SIZE(ist_idts), true);
|
||||
}
|
||||
|
||||
/**
|
||||
* idt_setup_debugidt_traps - Initialize the debug idt table with debug traps
|
||||
*/
|
||||
void __init idt_setup_debugidt_traps(void)
|
||||
{
|
||||
memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
|
||||
|
||||
idt_setup_from_table(debug_idt_table, dbg_idts, ARRAY_SIZE(dbg_idts), false);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* idt_setup_apic_and_irq_gates - Setup APIC/SMP and normal interrupt gates
|
||||
*/
|
||||
void __init idt_setup_apic_and_irq_gates(void)
|
||||
{
|
||||
int i = FIRST_EXTERNAL_VECTOR;
|
||||
void *entry;
|
||||
|
||||
idt_setup_from_table(idt_table, apic_idts, ARRAY_SIZE(apic_idts), true);
|
||||
|
||||
for_each_clear_bit_from(i, used_vectors, FIRST_SYSTEM_VECTOR) {
|
||||
entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
|
||||
set_intr_gate(i, entry);
|
||||
}
|
||||
|
||||
for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
set_bit(i, used_vectors);
|
||||
set_intr_gate(i, spurious_interrupt);
|
||||
#else
|
||||
entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
|
||||
set_intr_gate(i, entry);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* idt_setup_early_handler - Initializes the idt table with early handlers
|
||||
*/
|
||||
void __init idt_setup_early_handler(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
|
||||
set_intr_gate(i, early_idt_handler_array[i]);
|
||||
#ifdef CONFIG_X86_32
|
||||
for ( ; i < NR_VECTORS; i++)
|
||||
set_intr_gate(i, early_ignore_irq);
|
||||
#endif
|
||||
load_idt(&idt_descr);
|
||||
}
|
||||
|
||||
/**
|
||||
* idt_invalidate - Invalidate interrupt descriptor table
|
||||
* @addr: The virtual address of the 'invalid' IDT
|
||||
*/
|
||||
void idt_invalidate(void *addr)
|
||||
{
|
||||
struct desc_ptr idt = { .address = (unsigned long) addr, .size = 0 };
|
||||
|
||||
load_idt(&idt);
|
||||
}
|
||||
|
||||
void __init update_intr_gate(unsigned int n, const void *addr)
|
||||
{
|
||||
if (WARN_ON_ONCE(!test_bit(n, used_vectors)))
|
||||
return;
|
||||
set_intr_gate(n, addr);
|
||||
}
|
||||
|
||||
void alloc_intr_gate(unsigned int n, const void *addr)
|
||||
{
|
||||
BUG_ON(n < FIRST_SYSTEM_VECTOR);
|
||||
if (!test_and_set_bit(n, used_vectors))
|
||||
set_intr_gate(n, addr);
|
||||
}
|
|
@ -29,9 +29,6 @@ EXPORT_PER_CPU_SYMBOL(irq_regs);
|
|||
|
||||
atomic_t irq_err_count;
|
||||
|
||||
/* Function pointer for generic interrupt vector handling */
|
||||
void (*x86_platform_ipi_callback)(void) = NULL;
|
||||
|
||||
/*
|
||||
* 'what should we do if we get a hw irq event on an illegal vector'.
|
||||
* each architecture has to answer this themselves.
|
||||
|
@ -87,13 +84,13 @@ int arch_show_interrupts(struct seq_file *p, int prec)
|
|||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
|
||||
seq_puts(p, " APIC ICR read retries\n");
|
||||
#endif
|
||||
if (x86_platform_ipi_callback) {
|
||||
seq_printf(p, "%*s: ", prec, "PLT");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
|
||||
seq_puts(p, " Platform interrupts\n");
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
seq_printf(p, "%*s: ", prec, "RES");
|
||||
for_each_online_cpu(j)
|
||||
|
@ -183,9 +180,9 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
|
|||
sum += irq_stats(cpu)->apic_perf_irqs;
|
||||
sum += irq_stats(cpu)->apic_irq_work_irqs;
|
||||
sum += irq_stats(cpu)->icr_read_retry_count;
|
||||
#endif
|
||||
if (x86_platform_ipi_callback)
|
||||
sum += irq_stats(cpu)->x86_platform_ipis;
|
||||
#endif
|
||||
#ifdef CONFIG_SMP
|
||||
sum += irq_stats(cpu)->irq_resched_count;
|
||||
sum += irq_stats(cpu)->irq_call_count;
|
||||
|
@ -259,26 +256,26 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
return 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/* Function pointer for generic interrupt vector handling */
|
||||
void (*x86_platform_ipi_callback)(void) = NULL;
|
||||
/*
|
||||
* Handler for X86_PLATFORM_IPI_VECTOR.
|
||||
*/
|
||||
void __smp_x86_platform_ipi(void)
|
||||
{
|
||||
inc_irq_stat(x86_platform_ipis);
|
||||
|
||||
if (x86_platform_ipi_callback)
|
||||
x86_platform_ipi_callback();
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
entering_ack_irq();
|
||||
__smp_x86_platform_ipi();
|
||||
trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
|
||||
inc_irq_stat(x86_platform_ipis);
|
||||
if (x86_platform_ipi_callback)
|
||||
x86_platform_ipi_callback();
|
||||
trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
|
||||
exiting_irq();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
static void dummy_handler(void) {}
|
||||
|
@ -334,19 +331,6 @@ __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
|
|||
}
|
||||
#endif
|
||||
|
||||
__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
|
||||
entering_ack_irq();
|
||||
trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
|
||||
__smp_x86_platform_ipi();
|
||||
trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
|
||||
exiting_irq();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
|
@ -431,7 +415,7 @@ int check_irq_vectors_for_cpu_disable(void)
|
|||
* this w/o holding vector_lock.
|
||||
*/
|
||||
for (vector = FIRST_EXTERNAL_VECTOR;
|
||||
vector < first_system_vector; vector++) {
|
||||
vector < FIRST_SYSTEM_VECTOR; vector++) {
|
||||
if (!test_bit(vector, used_vectors) &&
|
||||
IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector])) {
|
||||
if (++count == this_count)
|
||||
|
|
|
@ -11,35 +11,23 @@
|
|||
#include <asm/trace/irq_vectors.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
static inline void __smp_irq_work_interrupt(void)
|
||||
{
|
||||
inc_irq_stat(apic_irq_work_irqs);
|
||||
irq_work_run();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
__visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ipi_entering_ack_irq();
|
||||
__smp_irq_work_interrupt();
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_trace_irq_work_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ipi_entering_ack_irq();
|
||||
trace_irq_work_entry(IRQ_WORK_VECTOR);
|
||||
__smp_irq_work_interrupt();
|
||||
inc_irq_stat(apic_irq_work_irqs);
|
||||
irq_work_run();
|
||||
trace_irq_work_exit(IRQ_WORK_VECTOR);
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
void arch_irq_work_raise(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
if (!arch_irq_work_has_interrupt())
|
||||
return;
|
||||
|
||||
apic->send_IPI_self(IRQ_WORK_VECTOR);
|
||||
apic_wait_icr_idle();
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -55,18 +55,6 @@ DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
|||
[0 ... NR_VECTORS - 1] = VECTOR_UNUSED,
|
||||
};
|
||||
|
||||
int vector_used_by_percpu_irq(unsigned int vector)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (!IS_ERR_OR_NULL(per_cpu(vector_irq, cpu)[vector]))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init init_ISA_irqs(void)
|
||||
{
|
||||
struct irq_chip *chip = legacy_pic->chip;
|
||||
|
@ -99,100 +87,12 @@ void __init init_IRQ(void)
|
|||
x86_init.irqs.intr_init();
|
||||
}
|
||||
|
||||
static void __init smp_intr_init(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
|
||||
* IPI, driven by wakeup.
|
||||
*/
|
||||
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
|
||||
|
||||
/* IPI for generic function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
|
||||
|
||||
/* IPI for generic single function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
|
||||
call_function_single_interrupt);
|
||||
|
||||
/* Low priority IPI to cleanup after moving an irq */
|
||||
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
|
||||
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
|
||||
|
||||
/* IPI used for rebooting/stopping */
|
||||
alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
||||
static void __init apic_intr_init(void)
|
||||
{
|
||||
smp_intr_init();
|
||||
|
||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_MCE_THRESHOLD
|
||||
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_AMD
|
||||
alloc_intr_gate(DEFERRED_ERROR_VECTOR, deferred_error_interrupt);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/* self generated IPI for local APIC timer */
|
||||
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
|
||||
|
||||
/* IPI for X86 platform specific use */
|
||||
alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi);
|
||||
#ifdef CONFIG_HAVE_KVM
|
||||
/* IPI for KVM to deliver posted interrupt */
|
||||
alloc_intr_gate(POSTED_INTR_VECTOR, kvm_posted_intr_ipi);
|
||||
/* IPI for KVM to deliver interrupt to wake up tasks */
|
||||
alloc_intr_gate(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi);
|
||||
/* IPI for KVM to deliver nested posted interrupt */
|
||||
alloc_intr_gate(POSTED_INTR_NESTED_VECTOR, kvm_posted_intr_nested_ipi);
|
||||
#endif
|
||||
|
||||
/* IPI vectors for APIC spurious and error interrupts */
|
||||
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
|
||||
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
|
||||
|
||||
/* IRQ work interrupts: */
|
||||
# ifdef CONFIG_IRQ_WORK
|
||||
alloc_intr_gate(IRQ_WORK_VECTOR, irq_work_interrupt);
|
||||
# endif
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init native_init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Execute any quirks before the call gates are initialised: */
|
||||
x86_init.irqs.pre_vector_init();
|
||||
|
||||
apic_intr_init();
|
||||
|
||||
/*
|
||||
* Cover the whole vector space, no vector can escape
|
||||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
*/
|
||||
i = FIRST_EXTERNAL_VECTOR;
|
||||
#ifndef CONFIG_X86_LOCAL_APIC
|
||||
#define first_system_vector NR_VECTORS
|
||||
#endif
|
||||
for_each_clear_bit_from(i, used_vectors, first_system_vector) {
|
||||
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
|
||||
set_intr_gate(i, irq_entries_start +
|
||||
8 * (i - FIRST_EXTERNAL_VECTOR));
|
||||
}
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
for_each_clear_bit_from(i, used_vectors, NR_VECTORS)
|
||||
set_intr_gate(i, spurious_interrupt);
|
||||
#endif
|
||||
idt_setup_apic_and_irq_gates();
|
||||
|
||||
if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
|
||||
setup_irq(2, &irq2);
|
||||
|
|
|
@ -263,7 +263,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
|
|||
|
||||
switch (kvm_read_and_reset_pf_reason()) {
|
||||
default:
|
||||
trace_do_page_fault(regs, error_code);
|
||||
do_page_fault(regs, error_code);
|
||||
break;
|
||||
case KVM_PV_REASON_PAGE_NOT_PRESENT:
|
||||
/* page is swapped out by the host. */
|
||||
|
@ -455,7 +455,7 @@ static int kvm_cpu_down_prepare(unsigned int cpu)
|
|||
|
||||
static void __init kvm_apf_trap_init(void)
|
||||
{
|
||||
set_intr_gate(14, async_page_fault);
|
||||
update_intr_gate(X86_TRAP_PF, async_page_fault);
|
||||
}
|
||||
|
||||
void __init kvm_guest_init(void)
|
||||
|
|
|
@ -26,18 +26,6 @@
|
|||
#include <asm/set_memory.h>
|
||||
#include <asm/debugreg.h>
|
||||
|
||||
static void set_idt(void *newidt, __u16 limit)
|
||||
{
|
||||
struct desc_ptr curidt;
|
||||
|
||||
/* ia32 supports unaliged loads & stores */
|
||||
curidt.size = limit;
|
||||
curidt.address = (unsigned long)newidt;
|
||||
|
||||
load_idt(&curidt);
|
||||
}
|
||||
|
||||
|
||||
static void set_gdt(void *newgdt, __u16 limit)
|
||||
{
|
||||
struct desc_ptr curgdt;
|
||||
|
@ -245,7 +233,7 @@ void machine_kexec(struct kimage *image)
|
|||
* If you want to load them you must set up your own idt & gdt.
|
||||
*/
|
||||
set_gdt(phys_to_virt(0), 0);
|
||||
set_idt(phys_to_virt(0), 0);
|
||||
idt_invalidate(phys_to_virt(0));
|
||||
|
||||
/* now call it */
|
||||
image->start = relocate_kernel_ptr((unsigned long)image->head,
|
||||
|
|
|
@ -319,9 +319,6 @@ __visible struct pv_irq_ops pv_irq_ops = {
|
|||
.irq_enable = __PV_IS_CALLEE_SAVE(native_irq_enable),
|
||||
.safe_halt = native_safe_halt,
|
||||
.halt = native_halt,
|
||||
#ifdef CONFIG_X86_64
|
||||
.adjust_exception_frame = paravirt_nop,
|
||||
#endif
|
||||
};
|
||||
|
||||
__visible struct pv_cpu_ops pv_cpu_ops = {
|
||||
|
|
|
@ -38,8 +38,6 @@
|
|||
void (*pm_power_off)(void);
|
||||
EXPORT_SYMBOL(pm_power_off);
|
||||
|
||||
static const struct desc_ptr no_idt = {};
|
||||
|
||||
/*
|
||||
* This is set if we need to go through the 'emergency' path.
|
||||
* When machine_emergency_restart() is called, we may be on
|
||||
|
@ -638,7 +636,7 @@ static void native_machine_emergency_restart(void)
|
|||
break;
|
||||
|
||||
case BOOT_TRIPLE:
|
||||
load_idt(&no_idt);
|
||||
idt_invalidate(NULL);
|
||||
__asm__ __volatile__("int3");
|
||||
|
||||
/* We're probably dead after this, but... */
|
||||
|
|
|
@ -900,7 +900,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
*/
|
||||
olpc_ofw_detect();
|
||||
|
||||
early_trap_init();
|
||||
idt_setup_early_traps();
|
||||
early_cpu_init();
|
||||
early_ioremap_init();
|
||||
|
||||
|
@ -1171,7 +1171,7 @@ void __init setup_arch(char **cmdline_p)
|
|||
|
||||
init_mem_mapping();
|
||||
|
||||
early_trap_pf_init();
|
||||
idt_setup_early_pf();
|
||||
|
||||
/*
|
||||
* Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
|
||||
|
|
|
@ -155,13 +155,10 @@ static void __init pcpup_populate_pte(unsigned long addr)
|
|||
static inline void setup_percpu_segment(int cpu)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
struct desc_struct gdt;
|
||||
struct desc_struct d = GDT_ENTRY_INIT(0x8092, per_cpu_offset(cpu),
|
||||
0xFFFFF);
|
||||
|
||||
pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
|
||||
0x2 | DESCTYPE_S, 0x8);
|
||||
gdt.s = 1;
|
||||
write_gdt_entry(get_cpu_gdt_rw(cpu),
|
||||
GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
|
||||
write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_PERCPU, &d, DESCTYPE_S);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -254,84 +254,45 @@ finish:
|
|||
}
|
||||
|
||||
/*
|
||||
* Reschedule call back.
|
||||
* Reschedule call back. KVM uses this interrupt to force a cpu out of
|
||||
* guest mode
|
||||
*/
|
||||
static inline void __smp_reschedule_interrupt(void)
|
||||
{
|
||||
inc_irq_stat(irq_resched_count);
|
||||
scheduler_ipi();
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
__smp_reschedule_interrupt();
|
||||
/*
|
||||
* KVM uses this interrupt to force a cpu out of guest mode
|
||||
*/
|
||||
}
|
||||
inc_irq_stat(irq_resched_count);
|
||||
|
||||
__visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* Need to call irq_enter() before calling the trace point.
|
||||
* __smp_reschedule_interrupt() calls irq_enter/exit() too (in
|
||||
* scheduler_ipi(). This is OK, since those functions are allowed
|
||||
* to nest.
|
||||
*/
|
||||
ipi_entering_ack_irq();
|
||||
trace_reschedule_entry(RESCHEDULE_VECTOR);
|
||||
__smp_reschedule_interrupt();
|
||||
trace_reschedule_exit(RESCHEDULE_VECTOR);
|
||||
exiting_irq();
|
||||
/*
|
||||
* KVM uses this interrupt to force a cpu out of guest mode
|
||||
*/
|
||||
}
|
||||
|
||||
static inline void __smp_call_function_interrupt(void)
|
||||
{
|
||||
generic_smp_call_function_interrupt();
|
||||
inc_irq_stat(irq_call_count);
|
||||
if (trace_resched_ipi_enabled()) {
|
||||
/*
|
||||
* scheduler_ipi() might call irq_enter() as well, but
|
||||
* nested calls are fine.
|
||||
*/
|
||||
irq_enter();
|
||||
trace_reschedule_entry(RESCHEDULE_VECTOR);
|
||||
scheduler_ipi();
|
||||
trace_reschedule_exit(RESCHEDULE_VECTOR);
|
||||
irq_exit();
|
||||
return;
|
||||
}
|
||||
scheduler_ipi();
|
||||
}
|
||||
|
||||
__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ipi_entering_ack_irq();
|
||||
__smp_call_function_interrupt();
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
__visible void __irq_entry
|
||||
smp_trace_call_function_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ipi_entering_ack_irq();
|
||||
trace_call_function_entry(CALL_FUNCTION_VECTOR);
|
||||
__smp_call_function_interrupt();
|
||||
inc_irq_stat(irq_call_count);
|
||||
generic_smp_call_function_interrupt();
|
||||
trace_call_function_exit(CALL_FUNCTION_VECTOR);
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
static inline void __smp_call_function_single_interrupt(void)
|
||||
{
|
||||
generic_smp_call_function_single_interrupt();
|
||||
inc_irq_stat(irq_call_count);
|
||||
}
|
||||
|
||||
__visible void __irq_entry
|
||||
smp_call_function_single_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ipi_entering_ack_irq();
|
||||
__smp_call_function_single_interrupt();
|
||||
exiting_irq();
|
||||
}
|
||||
|
||||
__visible void __irq_entry
|
||||
smp_trace_call_function_single_interrupt(struct pt_regs *regs)
|
||||
__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
|
||||
{
|
||||
ipi_entering_ack_irq();
|
||||
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
|
||||
__smp_call_function_single_interrupt();
|
||||
inc_irq_stat(irq_call_count);
|
||||
generic_smp_call_function_single_interrupt();
|
||||
trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
|
||||
exiting_irq();
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
|
|||
|
||||
while (n-- > 0) {
|
||||
if (LDT_empty(info) || LDT_zero(info)) {
|
||||
desc->a = desc->b = 0;
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
} else {
|
||||
fill_ldt(desc, info);
|
||||
|
||||
|
|
|
@ -4,57 +4,38 @@
|
|||
* Copyright (C) 2013 Seiji Aguchi <seiji.aguchi@hds.com>
|
||||
*
|
||||
*/
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/desc.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
atomic_t trace_idt_ctr = ATOMIC_INIT(0);
|
||||
struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
|
||||
(unsigned long) trace_idt_table };
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
/* No need to be aligned, but done to keep all IDTs defined the same way. */
|
||||
gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
|
||||
DEFINE_STATIC_KEY_FALSE(trace_pagefault_key);
|
||||
|
||||
static int trace_irq_vector_refcount;
|
||||
static DEFINE_MUTEX(irq_vector_mutex);
|
||||
|
||||
static void set_trace_idt_ctr(int val)
|
||||
int trace_pagefault_reg(void)
|
||||
{
|
||||
atomic_set(&trace_idt_ctr, val);
|
||||
/* Ensure the trace_idt_ctr is set before sending IPI */
|
||||
wmb();
|
||||
}
|
||||
|
||||
static void switch_idt(void *arg)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
load_current_idt();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
int trace_irq_vector_regfunc(void)
|
||||
{
|
||||
mutex_lock(&irq_vector_mutex);
|
||||
if (!trace_irq_vector_refcount) {
|
||||
set_trace_idt_ctr(1);
|
||||
smp_call_function(switch_idt, NULL, 0);
|
||||
switch_idt(NULL);
|
||||
}
|
||||
trace_irq_vector_refcount++;
|
||||
mutex_unlock(&irq_vector_mutex);
|
||||
static_branch_inc(&trace_pagefault_key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void trace_irq_vector_unregfunc(void)
|
||||
void trace_pagefault_unreg(void)
|
||||
{
|
||||
mutex_lock(&irq_vector_mutex);
|
||||
trace_irq_vector_refcount--;
|
||||
if (!trace_irq_vector_refcount) {
|
||||
set_trace_idt_ctr(0);
|
||||
smp_call_function(switch_idt, NULL, 0);
|
||||
switch_idt(NULL);
|
||||
}
|
||||
mutex_unlock(&irq_vector_mutex);
|
||||
static_branch_dec(&trace_pagefault_key);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(trace_resched_ipi_key);
|
||||
|
||||
int trace_resched_ipi_reg(void)
|
||||
{
|
||||
static_branch_inc(&trace_resched_ipi_key);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void trace_resched_ipi_unreg(void)
|
||||
{
|
||||
static_branch_dec(&trace_resched_ipi_key);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -38,11 +38,6 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#ifdef CONFIG_EISA
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/eisa.h>
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_EDAC)
|
||||
#include <linux/edac.h>
|
||||
#endif
|
||||
|
@ -70,20 +65,13 @@
|
|||
#include <asm/x86_init.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/proto.h>
|
||||
|
||||
/* No need to be aligned, but done to keep all IDTs defined the same way. */
|
||||
gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
|
||||
#else
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/proto.h>
|
||||
#endif
|
||||
|
||||
/* Must be page-aligned because the real IDT is used in a fixmap. */
|
||||
gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
|
||||
|
||||
DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
||||
EXPORT_SYMBOL_GPL(used_vectors);
|
||||
|
||||
static inline void cond_local_irq_enable(struct pt_regs *regs)
|
||||
{
|
||||
|
@ -935,87 +923,9 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* Set of traps needed for early debugging. */
|
||||
void __init early_trap_init(void)
|
||||
{
|
||||
/*
|
||||
* Don't use IST to set DEBUG_STACK as it doesn't work until TSS
|
||||
* is ready in cpu_init() <-- trap_init(). Before trap_init(),
|
||||
* CPU runs at ring 0 so it is impossible to hit an invalid
|
||||
* stack. Using the original stack works well enough at this
|
||||
* early stage. DEBUG_STACK will be equipped after cpu_init() in
|
||||
* trap_init().
|
||||
*
|
||||
* We don't need to set trace_idt_table like set_intr_gate(),
|
||||
* since we don't have trace_debug and it will be reset to
|
||||
* 'debug' in trap_init() by set_intr_gate_ist().
|
||||
*/
|
||||
set_intr_gate_notrace(X86_TRAP_DB, debug);
|
||||
/* int3 can be called from all */
|
||||
set_system_intr_gate(X86_TRAP_BP, &int3);
|
||||
#ifdef CONFIG_X86_32
|
||||
set_intr_gate(X86_TRAP_PF, page_fault);
|
||||
#endif
|
||||
load_idt(&idt_descr);
|
||||
}
|
||||
|
||||
void __init early_trap_pf_init(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
set_intr_gate(X86_TRAP_PF, page_fault);
|
||||
#endif
|
||||
}
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_EISA
|
||||
void __iomem *p = early_ioremap(0x0FFFD9, 4);
|
||||
|
||||
if (readl(p) == 'E' + ('I'<<8) + ('S'<<16) + ('A'<<24))
|
||||
EISA_bus = 1;
|
||||
early_iounmap(p, 4);
|
||||
#endif
|
||||
|
||||
set_intr_gate(X86_TRAP_DE, divide_error);
|
||||
set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
|
||||
/* int4 can be called from all */
|
||||
set_system_intr_gate(X86_TRAP_OF, &overflow);
|
||||
set_intr_gate(X86_TRAP_BR, bounds);
|
||||
set_intr_gate(X86_TRAP_UD, invalid_op);
|
||||
set_intr_gate(X86_TRAP_NM, device_not_available);
|
||||
#ifdef CONFIG_X86_32
|
||||
set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
|
||||
#else
|
||||
set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
|
||||
#endif
|
||||
set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
|
||||
set_intr_gate(X86_TRAP_TS, invalid_TSS);
|
||||
set_intr_gate(X86_TRAP_NP, segment_not_present);
|
||||
set_intr_gate(X86_TRAP_SS, stack_segment);
|
||||
set_intr_gate(X86_TRAP_GP, general_protection);
|
||||
set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
|
||||
set_intr_gate(X86_TRAP_MF, coprocessor_error);
|
||||
set_intr_gate(X86_TRAP_AC, alignment_check);
|
||||
#ifdef CONFIG_X86_MCE
|
||||
set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
|
||||
#endif
|
||||
set_intr_gate(X86_TRAP_XF, simd_coprocessor_error);
|
||||
|
||||
/* Reserve all the builtin and the syscall vector: */
|
||||
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
|
||||
set_bit(i, used_vectors);
|
||||
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
|
||||
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
|
||||
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
|
||||
#endif
|
||||
idt_setup_traps();
|
||||
|
||||
/*
|
||||
* Set the IDT descriptor to a fixed read-only location, so that the
|
||||
|
@ -1030,20 +940,9 @@ void __init trap_init(void)
|
|||
*/
|
||||
cpu_init();
|
||||
|
||||
/*
|
||||
* X86_TRAP_DB and X86_TRAP_BP have been set
|
||||
* in early_trap_init(). However, ITS works only after
|
||||
* cpu_init() loads TSS. See comments in early_trap_init().
|
||||
*/
|
||||
set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
|
||||
/* int3 can be called from all */
|
||||
set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
|
||||
idt_setup_ist_traps();
|
||||
|
||||
x86_init.irqs.trap_init();
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
memcpy(&debug_idt_table, &idt_table, IDT_ENTRIES * 16);
|
||||
set_nmi_gate(X86_TRAP_DB, &debug);
|
||||
set_nmi_gate(X86_TRAP_BP, &int3);
|
||||
#endif
|
||||
idt_setup_debugidt_traps();
|
||||
}
|
||||
|
|
|
@ -8779,7 +8779,7 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
|
|||
|
||||
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
|
||||
desc = (gate_desc *)vmx->host_idt_base + vector;
|
||||
entry = gate_offset(*desc);
|
||||
entry = gate_offset(desc);
|
||||
asm volatile(
|
||||
#ifdef CONFIG_X86_64
|
||||
"mov %%" _ASM_SP ", %[sp]\n\t"
|
||||
|
|
|
@ -147,7 +147,7 @@ void math_emulate(struct math_emu_info *info)
|
|||
}
|
||||
|
||||
code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
|
||||
if (SEG_D_SIZE(code_descriptor)) {
|
||||
if (code_descriptor.d) {
|
||||
/* The above test may be wrong, the book is not clear */
|
||||
/* Segmented 32 bit protected mode */
|
||||
addr_modes.default_mode = SEG32;
|
||||
|
@ -155,11 +155,10 @@ void math_emulate(struct math_emu_info *info)
|
|||
/* 16 bit protected mode */
|
||||
addr_modes.default_mode = PM16;
|
||||
}
|
||||
FPU_EIP += code_base = SEG_BASE_ADDR(code_descriptor);
|
||||
code_limit = code_base
|
||||
+ (SEG_LIMIT(code_descriptor) +
|
||||
1) * SEG_GRANULARITY(code_descriptor)
|
||||
- 1;
|
||||
FPU_EIP += code_base = seg_get_base(&code_descriptor);
|
||||
code_limit = seg_get_limit(&code_descriptor) + 1;
|
||||
code_limit *= seg_get_granularity(&code_descriptor);
|
||||
code_limit += code_base - 1;
|
||||
if (code_limit < code_base)
|
||||
code_limit = 0xffffffff;
|
||||
}
|
||||
|
|
|
@ -34,17 +34,43 @@ static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#define SEG_D_SIZE(x) ((x).b & (3 << 21))
|
||||
#define SEG_G_BIT(x) ((x).b & (1 << 23))
|
||||
#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
|
||||
#define SEG_286_MODE(x) ((x).b & ( 0xff000000 | 0xf0000 | (1 << 23)))
|
||||
#define SEG_BASE_ADDR(s) (((s).b & 0xff000000) \
|
||||
| (((s).b & 0xff) << 16) | ((s).a >> 16))
|
||||
#define SEG_LIMIT(s) (((s).b & 0xff0000) | ((s).a & 0xffff))
|
||||
#define SEG_EXECUTE_ONLY(s) (((s).b & ((1 << 11) | (1 << 9))) == (1 << 11))
|
||||
#define SEG_WRITE_PERM(s) (((s).b & ((1 << 11) | (1 << 9))) == (1 << 9))
|
||||
#define SEG_EXPAND_DOWN(s) (((s).b & ((1 << 11) | (1 << 10))) \
|
||||
== (1 << 10))
|
||||
#define SEG_TYPE_WRITABLE (1U << 1)
|
||||
#define SEG_TYPE_EXPANDS_DOWN (1U << 2)
|
||||
#define SEG_TYPE_EXECUTE (1U << 3)
|
||||
#define SEG_TYPE_EXPAND_MASK (SEG_TYPE_EXPANDS_DOWN | SEG_TYPE_EXECUTE)
|
||||
#define SEG_TYPE_EXECUTE_MASK (SEG_TYPE_WRITABLE | SEG_TYPE_EXECUTE)
|
||||
|
||||
static inline unsigned long seg_get_base(struct desc_struct *d)
|
||||
{
|
||||
unsigned long base = (unsigned long)d->base2 << 24;
|
||||
|
||||
return base | ((unsigned long)d->base1 << 16) | d->base0;
|
||||
}
|
||||
|
||||
static inline unsigned long seg_get_limit(struct desc_struct *d)
|
||||
{
|
||||
return ((unsigned long)d->limit1 << 16) | d->limit0;
|
||||
}
|
||||
|
||||
static inline unsigned long seg_get_granularity(struct desc_struct *d)
|
||||
{
|
||||
return d->g ? 4096 : 1;
|
||||
}
|
||||
|
||||
static inline bool seg_expands_down(struct desc_struct *d)
|
||||
{
|
||||
return (d->type & SEG_TYPE_EXPAND_MASK) == SEG_TYPE_EXPANDS_DOWN;
|
||||
}
|
||||
|
||||
static inline bool seg_execute_only(struct desc_struct *d)
|
||||
{
|
||||
return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_EXECUTE;
|
||||
}
|
||||
|
||||
static inline bool seg_writable(struct desc_struct *d)
|
||||
{
|
||||
return (d->type & SEG_TYPE_EXECUTE_MASK) == SEG_TYPE_WRITABLE;
|
||||
}
|
||||
|
||||
#define I387 (¤t->thread.fpu.state)
|
||||
#define FPU_info (I387->soft.info)
|
||||
|
|
|
@ -159,17 +159,18 @@ static long pm_address(u_char FPU_modrm, u_char segment,
|
|||
}
|
||||
|
||||
descriptor = FPU_get_ldt_descriptor(addr->selector);
|
||||
base_address = SEG_BASE_ADDR(descriptor);
|
||||
base_address = seg_get_base(&descriptor);
|
||||
address = base_address + offset;
|
||||
limit = base_address
|
||||
+ (SEG_LIMIT(descriptor) + 1) * SEG_GRANULARITY(descriptor) - 1;
|
||||
limit = seg_get_limit(&descriptor) + 1;
|
||||
limit *= seg_get_granularity(&descriptor);
|
||||
limit += base_address - 1;
|
||||
if (limit < base_address)
|
||||
limit = 0xffffffff;
|
||||
|
||||
if (SEG_EXPAND_DOWN(descriptor)) {
|
||||
if (SEG_G_BIT(descriptor))
|
||||
if (seg_expands_down(&descriptor)) {
|
||||
if (descriptor.g) {
|
||||
seg_top = 0xffffffff;
|
||||
else {
|
||||
} else {
|
||||
seg_top = base_address + (1 << 20);
|
||||
if (seg_top < base_address)
|
||||
seg_top = 0xffffffff;
|
||||
|
@ -182,8 +183,8 @@ static long pm_address(u_char FPU_modrm, u_char segment,
|
|||
(address > limit) || (address < base_address) ? 0 :
|
||||
((limit - address) >= 254 ? 255 : limit - address + 1);
|
||||
}
|
||||
if (SEG_EXECUTE_ONLY(descriptor) ||
|
||||
(!SEG_WRITE_PERM(descriptor) && (FPU_modrm & FPU_WRITE_BIT))) {
|
||||
if (seg_execute_only(&descriptor) ||
|
||||
(!seg_writable(&descriptor) && (FPU_modrm & FPU_WRITE_BIT))) {
|
||||
access_limit = 0;
|
||||
}
|
||||
return address;
|
||||
|
|
|
@ -1258,10 +1258,6 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
|
|||
* This routine handles page faults. It determines the address,
|
||||
* and the problem, and then passes it off to one of the appropriate
|
||||
* routines.
|
||||
*
|
||||
* This function must have noinline because both callers
|
||||
* {,trace_}do_page_fault() have notrace on. Having this an actual function
|
||||
* guarantees there's a function trace entry.
|
||||
*/
|
||||
static noinline void
|
||||
__do_page_fault(struct pt_regs *regs, unsigned long error_code,
|
||||
|
@ -1494,27 +1490,6 @@ good_area:
|
|||
}
|
||||
NOKPROBE_SYMBOL(__do_page_fault);
|
||||
|
||||
dotraplinkage void notrace
|
||||
do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
{
|
||||
unsigned long address = read_cr2(); /* Get the faulting address */
|
||||
enum ctx_state prev_state;
|
||||
|
||||
/*
|
||||
* We must have this function tagged with __kprobes, notrace and call
|
||||
* read_cr2() before calling anything else. To avoid calling any kind
|
||||
* of tracing machinery before we've observed the CR2 value.
|
||||
*
|
||||
* exception_{enter,exit}() contain all sorts of tracepoints.
|
||||
*/
|
||||
|
||||
prev_state = exception_enter();
|
||||
__do_page_fault(regs, error_code, address);
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_page_fault);
|
||||
|
||||
#ifdef CONFIG_TRACING
|
||||
static nokprobe_inline void
|
||||
trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
|
||||
unsigned long error_code)
|
||||
|
@ -1525,22 +1500,24 @@ trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
|
|||
trace_page_fault_kernel(address, regs, error_code);
|
||||
}
|
||||
|
||||
/*
|
||||
* We must have this function blacklisted from kprobes, tagged with notrace
|
||||
* and call read_cr2() before calling anything else. To avoid calling any
|
||||
* kind of tracing machinery before we've observed the CR2 value.
|
||||
*
|
||||
* exception_{enter,exit}() contains all sorts of tracepoints.
|
||||
*/
|
||||
dotraplinkage void notrace
|
||||
trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
{
|
||||
/*
|
||||
* The exception_enter and tracepoint processing could
|
||||
* trigger another page faults (user space callchain
|
||||
* reading) and destroy the original cr2 value, so read
|
||||
* the faulting address now.
|
||||
*/
|
||||
unsigned long address = read_cr2();
|
||||
unsigned long address = read_cr2(); /* Get the faulting address */
|
||||
enum ctx_state prev_state;
|
||||
|
||||
prev_state = exception_enter();
|
||||
trace_page_fault_entries(address, regs, error_code);
|
||||
if (trace_pagefault_enabled())
|
||||
trace_page_fault_entries(address, regs, error_code);
|
||||
|
||||
__do_page_fault(regs, error_code, address);
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
NOKPROBE_SYMBOL(trace_do_page_fault);
|
||||
#endif /* CONFIG_TRACING */
|
||||
NOKPROBE_SYMBOL(do_page_fault);
|
||||
|
|
|
@ -501,7 +501,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
|
|||
static inline bool desc_equal(const struct desc_struct *d1,
|
||||
const struct desc_struct *d2)
|
||||
{
|
||||
return d1->a == d2->a && d1->b == d2->b;
|
||||
return !memcmp(d1, d2, sizeof(*d1));
|
||||
}
|
||||
|
||||
static void load_TLS_descriptor(struct thread_struct *t,
|
||||
|
@ -586,59 +586,91 @@ static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
|
|||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
struct trap_array_entry {
|
||||
void (*orig)(void);
|
||||
void (*xen)(void);
|
||||
bool ist_okay;
|
||||
};
|
||||
|
||||
static struct trap_array_entry trap_array[] = {
|
||||
{ debug, xen_xendebug, true },
|
||||
{ int3, xen_xenint3, true },
|
||||
{ double_fault, xen_double_fault, true },
|
||||
#ifdef CONFIG_X86_MCE
|
||||
{ machine_check, xen_machine_check, true },
|
||||
#endif
|
||||
{ nmi, xen_nmi, true },
|
||||
{ overflow, xen_overflow, false },
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
{ entry_INT80_compat, xen_entry_INT80_compat, false },
|
||||
#endif
|
||||
{ page_fault, xen_page_fault, false },
|
||||
{ divide_error, xen_divide_error, false },
|
||||
{ bounds, xen_bounds, false },
|
||||
{ invalid_op, xen_invalid_op, false },
|
||||
{ device_not_available, xen_device_not_available, false },
|
||||
{ coprocessor_segment_overrun, xen_coprocessor_segment_overrun, false },
|
||||
{ invalid_TSS, xen_invalid_TSS, false },
|
||||
{ segment_not_present, xen_segment_not_present, false },
|
||||
{ stack_segment, xen_stack_segment, false },
|
||||
{ general_protection, xen_general_protection, false },
|
||||
{ spurious_interrupt_bug, xen_spurious_interrupt_bug, false },
|
||||
{ coprocessor_error, xen_coprocessor_error, false },
|
||||
{ alignment_check, xen_alignment_check, false },
|
||||
{ simd_coprocessor_error, xen_simd_coprocessor_error, false },
|
||||
};
|
||||
|
||||
static bool get_trap_addr(void **addr, unsigned int ist)
|
||||
{
|
||||
unsigned int nr;
|
||||
bool ist_okay = false;
|
||||
|
||||
/*
|
||||
* Replace trap handler addresses by Xen specific ones.
|
||||
* Check for known traps using IST and whitelist them.
|
||||
* The debugger ones are the only ones we care about.
|
||||
* Xen will handle faults like double_fault, * so we should never see
|
||||
* them. Warn if there's an unexpected IST-using fault handler.
|
||||
*/
|
||||
for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) {
|
||||
struct trap_array_entry *entry = trap_array + nr;
|
||||
|
||||
if (*addr == entry->orig) {
|
||||
*addr = entry->xen;
|
||||
ist_okay = entry->ist_okay;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(ist != 0 && !ist_okay))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int cvt_gate_to_trap(int vector, const gate_desc *val,
|
||||
struct trap_info *info)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
|
||||
if (val->bits.type != GATE_TRAP && val->bits.type != GATE_INTERRUPT)
|
||||
return 0;
|
||||
|
||||
info->vector = vector;
|
||||
|
||||
addr = gate_offset(*val);
|
||||
addr = gate_offset(val);
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* Look for known traps using IST, and substitute them
|
||||
* appropriately. The debugger ones are the only ones we care
|
||||
* about. Xen will handle faults like double_fault,
|
||||
* so we should never see them. Warn if
|
||||
* there's an unexpected IST-using fault handler.
|
||||
*/
|
||||
if (addr == (unsigned long)debug)
|
||||
addr = (unsigned long)xen_debug;
|
||||
else if (addr == (unsigned long)int3)
|
||||
addr = (unsigned long)xen_int3;
|
||||
else if (addr == (unsigned long)stack_segment)
|
||||
addr = (unsigned long)xen_stack_segment;
|
||||
else if (addr == (unsigned long)double_fault) {
|
||||
/* Don't need to handle these */
|
||||
if (!get_trap_addr((void **)&addr, val->bits.ist))
|
||||
return 0;
|
||||
#ifdef CONFIG_X86_MCE
|
||||
} else if (addr == (unsigned long)machine_check) {
|
||||
/*
|
||||
* when xen hypervisor inject vMCE to guest,
|
||||
* use native mce handler to handle it
|
||||
*/
|
||||
;
|
||||
#endif
|
||||
} else if (addr == (unsigned long)nmi)
|
||||
/*
|
||||
* Use the native version as well.
|
||||
*/
|
||||
;
|
||||
else {
|
||||
/* Some other trap using IST? */
|
||||
if (WARN_ON(val->ist != 0))
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_X86_64 */
|
||||
info->address = addr;
|
||||
|
||||
info->cs = gate_segment(*val);
|
||||
info->flags = val->dpl;
|
||||
info->cs = gate_segment(val);
|
||||
info->flags = val->bits.dpl;
|
||||
/* interrupt gates clear IF */
|
||||
if (val->type == GATE_INTERRUPT)
|
||||
if (val->bits.type == GATE_INTERRUPT)
|
||||
info->flags |= 1 << 2;
|
||||
|
||||
return 1;
|
||||
|
|
|
@ -123,9 +123,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
|
|||
|
||||
.safe_halt = xen_safe_halt,
|
||||
.halt = xen_halt,
|
||||
#ifdef CONFIG_X86_64
|
||||
.adjust_exception_frame = xen_adjust_exception_frame,
|
||||
#endif
|
||||
};
|
||||
|
||||
void __init xen_init_irq_ops(void)
|
||||
|
|
|
@ -16,11 +16,42 @@
|
|||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
ENTRY(xen_adjust_exception_frame)
|
||||
mov 8+0(%rsp), %rcx
|
||||
mov 8+8(%rsp), %r11
|
||||
ret $16
|
||||
ENDPROC(xen_adjust_exception_frame)
|
||||
.macro xen_pv_trap name
|
||||
ENTRY(xen_\name)
|
||||
pop %rcx
|
||||
pop %r11
|
||||
jmp \name
|
||||
END(xen_\name)
|
||||
.endm
|
||||
|
||||
xen_pv_trap divide_error
|
||||
xen_pv_trap debug
|
||||
xen_pv_trap xendebug
|
||||
xen_pv_trap int3
|
||||
xen_pv_trap xenint3
|
||||
xen_pv_trap nmi
|
||||
xen_pv_trap overflow
|
||||
xen_pv_trap bounds
|
||||
xen_pv_trap invalid_op
|
||||
xen_pv_trap device_not_available
|
||||
xen_pv_trap double_fault
|
||||
xen_pv_trap coprocessor_segment_overrun
|
||||
xen_pv_trap invalid_TSS
|
||||
xen_pv_trap segment_not_present
|
||||
xen_pv_trap stack_segment
|
||||
xen_pv_trap general_protection
|
||||
xen_pv_trap page_fault
|
||||
xen_pv_trap spurious_interrupt_bug
|
||||
xen_pv_trap coprocessor_error
|
||||
xen_pv_trap alignment_check
|
||||
#ifdef CONFIG_X86_MCE
|
||||
xen_pv_trap machine_check
|
||||
#endif /* CONFIG_X86_MCE */
|
||||
xen_pv_trap simd_coprocessor_error
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
xen_pv_trap entry_INT80_compat
|
||||
#endif
|
||||
xen_pv_trap hypervisor_callback
|
||||
|
||||
hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
|
||||
/*
|
||||
|
|
|
@ -138,7 +138,6 @@ __visible void xen_restore_fl_direct(unsigned long);
|
|||
__visible void xen_iret(void);
|
||||
__visible void xen_sysret32(void);
|
||||
__visible void xen_sysret64(void);
|
||||
__visible void xen_adjust_exception_frame(void);
|
||||
|
||||
extern int xen_panic_handler_init(void);
|
||||
|
||||
|
|
|
@ -1653,10 +1653,8 @@ void xen_callback_vector(void)
|
|||
return;
|
||||
}
|
||||
pr_info("Xen HVM callback vector for event delivery is enabled\n");
|
||||
/* in the restore case the vector has already been allocated */
|
||||
if (!test_bit(HYPERVISOR_CALLBACK_VECTOR, used_vectors))
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
|
||||
xen_hvm_callback_vector);
|
||||
alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR,
|
||||
xen_hvm_callback_vector);
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
|
Loading…
Reference in a new issue