Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar: "With over 300 commits it's been a busy cycle - with most of the work concentrated on the tooling side (as it should). The main kernel side enhancements were: - Add per event callchain limit: Recently we introduced a sysctl to tune the max-stack for all events for which callchains were requested: $ sysctl kernel.perf_event_max_stack kernel.perf_event_max_stack = 127 Now this patch introduces a way to configure this per event, i.e. this becomes possible: $ perf record -e sched:*/max-stack=2/ -e block:*/max-stack=10/ -a allowing finer tuning of how much buffer space callchains use. This uses an u16 from the reserved space at the end, leaving another u16 for future use. There has been interest in even finer tuning, namely to control the max stack for kernel and userspace callchains separately. Further discussion is needed, we may for instance use the remaining u16 for that and when it is present, assume that the sample_max_stack introduced in this patch applies for the kernel, and the u16 left is used for limiting the userspace callchain (Arnaldo Carvalho de Melo) - Optimize AUX event (hardware assisted side-band event) delivery (Kan Liang) - Rework Intel family name macro usage (this is partially x86 arch work) (Dave Hansen) - Refine and fix Intel LBR support (David Carrillo-Cisneros) - Add support for Intel 'TopDown' events (Andi Kleen) - Intel uncore PMU driver fixes and enhancements (Kan Liang) - ... other misc changes. Here's an incomplete list of the tooling enhancements (but there's much more, see the shortlog and the git log for details): - Support cross unwinding, i.e. collecting '--call-graph dwarf' perf.data files in one machine and then doing analysis in another machine of a different hardware architecture. This enables, for instance, to do: $ perf record -a --call-graph dwarf on a x86-32 or aarch64 system and then do 'perf report' on it on a x86_64 workstation (He Kuang) - Allow reading from a backward ring buffer (one setup via sys_perf_event_open() with perf_event_attr.write_backward = 1) (Wang Nan) - Finish merging initial SDT (Statically Defined Traces) support, see cset comments for details about how it all works (Masami Hiramatsu) - Support attaching eBPF programs to tracepoints (Wang Nan) - Add demangling of symbols in programs written in the Rust language (David Tolnay) - Add support for tracepoints in the python binding, including an example, that sets up and parses sched:sched_switch events, tools/perf/python/tracepoint.py (Jiri Olsa) - Introduce --stdio-color to set up the color output mode selection in 'annotate' and 'report', allowing emit color escape sequences when redirecting the output of these tools (Arnaldo Carvalho de Melo) - Add 'callindent' option to 'perf script -F', to indent the Intel PT call stack, making this output more ftrace-like (Adrian Hunter, Andi Kleen) - Allow dumping the object files generated by llvm when processing eBPF scriptlet events (Wang Nan) - Add stackcollapse.py script to help generating flame graphs (Paolo Bonzini) - Add --ldlat option to 'perf mem' to specify load latency for loads event (e.g. cpu/mem-loads/ ) (Jiri Olsa) - Tooling support for Intel TopDown counters, recently added to the kernel (Andi Kleen)" * 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (303 commits) perf tests: Add is_printable_array test perf tools: Make is_printable_array global perf script python: Fix string vs byte array resolving perf probe: Warn unmatched function filter correctly perf cpu_map: Add more helpers perf stat: Balance opening and reading events tools: Copy linux/{hash,poison}.h and check for drift perf tools: Remove include/linux/list.h from perf's MANIFEST tools: Copy the bitops files accessed from the kernel and check for drift Remove: kernel unistd*h files from perf's MANIFEST, not used perf tools: Remove tools/perf/util/include/linux/const.h perf tools: Remove tools/perf/util/include/asm/byteorder.h perf tools: Add missing linux/compiler.h include to perf-sys.h perf jit: Remove some no-op error handling perf jit: Add missing curly braces objtool: Initialize variable to silence old compiler objtool: Add -I$(srctree)/tools/arch/$(ARCH)/include/uapi perf record: Add --tail-synthesize option perf session: Don't warn about out of order event if write_backward is used perf tools: Enable overwrite settings ...
This commit is contained in:
commit
7e4dc77b28
324 changed files with 14003 additions and 2555 deletions
2
Makefile
2
Makefile
|
@ -1040,7 +1040,7 @@ ifdef CONFIG_STACK_VALIDATION
|
|||
ifeq ($(has_libelf),1)
|
||||
objtool_target := tools/objtool FORCE
|
||||
else
|
||||
$(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev or elfutils-libelf-devel")
|
||||
$(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel")
|
||||
SKIP_STACK_VALIDATION := 1
|
||||
export SKIP_STACK_VALIDATION
|
||||
endif
|
||||
|
|
|
@ -139,8 +139,8 @@ struct kvm_arch_memory_slot {
|
|||
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
|
||||
|
||||
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
|
||||
|
||||
/* Normal registers are mapped as coprocessor 16. */
|
||||
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
|
|
|
@ -1622,6 +1622,29 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, cha
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(events_sysfs_show);
|
||||
|
||||
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
char *page)
|
||||
{
|
||||
struct perf_pmu_events_ht_attr *pmu_attr =
|
||||
container_of(attr, struct perf_pmu_events_ht_attr, attr);
|
||||
|
||||
/*
|
||||
* Report conditional events depending on Hyper-Threading.
|
||||
*
|
||||
* This is overly conservative as usually the HT special
|
||||
* handling is not needed if the other CPU thread is idle.
|
||||
*
|
||||
* Note this does not (and cannot) handle the case when thread
|
||||
* siblings are invisible, for example with virtualization
|
||||
* if they are owned by some other guest. The user tool
|
||||
* has to re-read when a thread sibling gets onlined later.
|
||||
*/
|
||||
return sprintf(page, "%s",
|
||||
topology_max_smt_threads() > 1 ?
|
||||
pmu_attr->event_str_ht :
|
||||
pmu_attr->event_str_noht);
|
||||
}
|
||||
|
||||
EVENT_ATTR(cpu-cycles, CPU_CYCLES );
|
||||
EVENT_ATTR(instructions, INSTRUCTIONS );
|
||||
EVENT_ATTR(cache-references, CACHE_REFERENCES );
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/hardirq.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/apic.h>
|
||||
|
||||
#include "../perf_event.h"
|
||||
|
@ -185,7 +186,7 @@ static struct event_constraint intel_slm_event_constraints[] __read_mostly =
|
|||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
struct event_constraint intel_skl_event_constraints[] = {
|
||||
static struct event_constraint intel_skl_event_constraints[] = {
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
|
@ -204,10 +205,8 @@ struct event_constraint intel_skl_event_constraints[] = {
|
|||
};
|
||||
|
||||
static struct extra_reg intel_knl_extra_regs[] __read_mostly = {
|
||||
INTEL_UEVENT_EXTRA_REG(0x01b7,
|
||||
MSR_OFFCORE_RSP_0, 0x7f9ffbffffull, RSP_0),
|
||||
INTEL_UEVENT_EXTRA_REG(0x02b7,
|
||||
MSR_OFFCORE_RSP_1, 0x3f9ffbffffull, RSP_1),
|
||||
INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x799ffbb6e7ull, RSP_0),
|
||||
INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x399ffbffe7ull, RSP_1),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
|
@ -243,14 +242,51 @@ EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
|
|||
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
|
||||
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
|
||||
|
||||
struct attribute *nhm_events_attrs[] = {
|
||||
static struct attribute *nhm_events_attrs[] = {
|
||||
EVENT_PTR(mem_ld_nhm),
|
||||
NULL,
|
||||
};
|
||||
|
||||
struct attribute *snb_events_attrs[] = {
|
||||
/*
|
||||
* topdown events for Intel Core CPUs.
|
||||
*
|
||||
* The events are all in slots, which is a free slot in a 4 wide
|
||||
* pipeline. Some events are already reported in slots, for cycle
|
||||
* events we multiply by the pipeline width (4).
|
||||
*
|
||||
* With Hyper Threading on, topdown metrics are either summed or averaged
|
||||
* between the threads of a core: (count_t0 + count_t1).
|
||||
*
|
||||
* For the average case the metric is always scaled to pipeline width,
|
||||
* so we use factor 2 ((count_t0 + count_t1) / 2 * 4)
|
||||
*/
|
||||
|
||||
EVENT_ATTR_STR_HT(topdown-total-slots, td_total_slots,
|
||||
"event=0x3c,umask=0x0", /* cpu_clk_unhalted.thread */
|
||||
"event=0x3c,umask=0x0,any=1"); /* cpu_clk_unhalted.thread_any */
|
||||
EVENT_ATTR_STR_HT(topdown-total-slots.scale, td_total_slots_scale, "4", "2");
|
||||
EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued,
|
||||
"event=0xe,umask=0x1"); /* uops_issued.any */
|
||||
EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired,
|
||||
"event=0xc2,umask=0x2"); /* uops_retired.retire_slots */
|
||||
EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles,
|
||||
"event=0x9c,umask=0x1"); /* idq_uops_not_delivered_core */
|
||||
EVENT_ATTR_STR_HT(topdown-recovery-bubbles, td_recovery_bubbles,
|
||||
"event=0xd,umask=0x3,cmask=1", /* int_misc.recovery_cycles */
|
||||
"event=0xd,umask=0x3,cmask=1,any=1"); /* int_misc.recovery_cycles_any */
|
||||
EVENT_ATTR_STR_HT(topdown-recovery-bubbles.scale, td_recovery_bubbles_scale,
|
||||
"4", "2");
|
||||
|
||||
static struct attribute *snb_events_attrs[] = {
|
||||
EVENT_PTR(mem_ld_snb),
|
||||
EVENT_PTR(mem_st_snb),
|
||||
EVENT_PTR(td_slots_issued),
|
||||
EVENT_PTR(td_slots_retired),
|
||||
EVENT_PTR(td_fetch_bubbles),
|
||||
EVENT_PTR(td_total_slots),
|
||||
EVENT_PTR(td_total_slots_scale),
|
||||
EVENT_PTR(td_recovery_bubbles),
|
||||
EVENT_PTR(td_recovery_bubbles_scale),
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -280,7 +316,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
|
|||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
struct event_constraint intel_bdw_event_constraints[] = {
|
||||
static struct event_constraint intel_bdw_event_constraints[] = {
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
|
@ -1361,6 +1397,29 @@ static __initconst const u64 atom_hw_cache_event_ids
|
|||
},
|
||||
};
|
||||
|
||||
EVENT_ATTR_STR(topdown-total-slots, td_total_slots_slm, "event=0x3c");
|
||||
EVENT_ATTR_STR(topdown-total-slots.scale, td_total_slots_scale_slm, "2");
|
||||
/* no_alloc_cycles.not_delivered */
|
||||
EVENT_ATTR_STR(topdown-fetch-bubbles, td_fetch_bubbles_slm,
|
||||
"event=0xca,umask=0x50");
|
||||
EVENT_ATTR_STR(topdown-fetch-bubbles.scale, td_fetch_bubbles_scale_slm, "2");
|
||||
/* uops_retired.all */
|
||||
EVENT_ATTR_STR(topdown-slots-issued, td_slots_issued_slm,
|
||||
"event=0xc2,umask=0x10");
|
||||
/* uops_retired.all */
|
||||
EVENT_ATTR_STR(topdown-slots-retired, td_slots_retired_slm,
|
||||
"event=0xc2,umask=0x10");
|
||||
|
||||
static struct attribute *slm_events_attrs[] = {
|
||||
EVENT_PTR(td_total_slots_slm),
|
||||
EVENT_PTR(td_total_slots_scale_slm),
|
||||
EVENT_PTR(td_fetch_bubbles_slm),
|
||||
EVENT_PTR(td_fetch_bubbles_scale_slm),
|
||||
EVENT_PTR(td_slots_issued_slm),
|
||||
EVENT_PTR(td_slots_retired_slm),
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct extra_reg intel_slm_extra_regs[] __read_mostly =
|
||||
{
|
||||
/* must define OFFCORE_RSP_X first, see intel_fixup_er() */
|
||||
|
@ -3290,11 +3349,11 @@ static int intel_snb_pebs_broken(int cpu)
|
|||
u32 rev = UINT_MAX; /* default to broken for unknown models */
|
||||
|
||||
switch (cpu_data(cpu).x86_model) {
|
||||
case 42: /* SNB */
|
||||
case INTEL_FAM6_SANDYBRIDGE:
|
||||
rev = 0x28;
|
||||
break;
|
||||
|
||||
case 45: /* SNB-EP */
|
||||
case INTEL_FAM6_SANDYBRIDGE_X:
|
||||
switch (cpu_data(cpu).x86_mask) {
|
||||
case 6: rev = 0x618; break;
|
||||
case 7: rev = 0x70c; break;
|
||||
|
@ -3331,6 +3390,13 @@ static void intel_snb_check_microcode(void)
|
|||
}
|
||||
}
|
||||
|
||||
static bool is_lbr_from(unsigned long msr)
|
||||
{
|
||||
unsigned long lbr_from_nr = x86_pmu.lbr_from + x86_pmu.lbr_nr;
|
||||
|
||||
return x86_pmu.lbr_from <= msr && msr < lbr_from_nr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Under certain circumstances, access certain MSR may cause #GP.
|
||||
* The function tests if the input MSR can be safely accessed.
|
||||
|
@ -3351,13 +3417,24 @@ static bool check_msr(unsigned long msr, u64 mask)
|
|||
* Only change the bits which can be updated by wrmsrl.
|
||||
*/
|
||||
val_tmp = val_old ^ mask;
|
||||
|
||||
if (is_lbr_from(msr))
|
||||
val_tmp = lbr_from_signext_quirk_wr(val_tmp);
|
||||
|
||||
if (wrmsrl_safe(msr, val_tmp) ||
|
||||
rdmsrl_safe(msr, &val_new))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Quirk only affects validation in wrmsr(), so wrmsrl()'s value
|
||||
* should equal rdmsrl()'s even with the quirk.
|
||||
*/
|
||||
if (val_new != val_tmp)
|
||||
return false;
|
||||
|
||||
if (is_lbr_from(msr))
|
||||
val_old = lbr_from_signext_quirk_wr(val_old);
|
||||
|
||||
/* Here it's sure that the MSR can be safely accessed.
|
||||
* Restore the old value and return.
|
||||
*/
|
||||
|
@ -3466,6 +3543,13 @@ static struct attribute *hsw_events_attrs[] = {
|
|||
EVENT_PTR(cycles_ct),
|
||||
EVENT_PTR(mem_ld_hsw),
|
||||
EVENT_PTR(mem_st_hsw),
|
||||
EVENT_PTR(td_slots_issued),
|
||||
EVENT_PTR(td_slots_retired),
|
||||
EVENT_PTR(td_fetch_bubbles),
|
||||
EVENT_PTR(td_total_slots),
|
||||
EVENT_PTR(td_total_slots_scale),
|
||||
EVENT_PTR(td_recovery_bubbles),
|
||||
EVENT_PTR(td_recovery_bubbles_scale),
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -3537,15 +3621,15 @@ __init int intel_pmu_init(void)
|
|||
* Install the hw-cache-events table:
|
||||
*/
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 14: /* 65nm Core "Yonah" */
|
||||
case INTEL_FAM6_CORE_YONAH:
|
||||
pr_cont("Core events, ");
|
||||
break;
|
||||
|
||||
case 15: /* 65nm Core2 "Merom" */
|
||||
case INTEL_FAM6_CORE2_MEROM:
|
||||
x86_add_quirk(intel_clovertown_quirk);
|
||||
case 22: /* 65nm Core2 "Merom-L" */
|
||||
case 23: /* 45nm Core2 "Penryn" */
|
||||
case 29: /* 45nm Core2 "Dunnington (MP) */
|
||||
case INTEL_FAM6_CORE2_MEROM_L:
|
||||
case INTEL_FAM6_CORE2_PENRYN:
|
||||
case INTEL_FAM6_CORE2_DUNNINGTON:
|
||||
memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
|
@ -3556,9 +3640,9 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Core2 events, ");
|
||||
break;
|
||||
|
||||
case 30: /* 45nm Nehalem */
|
||||
case 26: /* 45nm Nehalem-EP */
|
||||
case 46: /* 45nm Nehalem-EX */
|
||||
case INTEL_FAM6_NEHALEM:
|
||||
case INTEL_FAM6_NEHALEM_EP:
|
||||
case INTEL_FAM6_NEHALEM_EX:
|
||||
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
|
||||
|
@ -3586,11 +3670,11 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Nehalem events, ");
|
||||
break;
|
||||
|
||||
case 28: /* 45nm Atom "Pineview" */
|
||||
case 38: /* 45nm Atom "Lincroft" */
|
||||
case 39: /* 32nm Atom "Penwell" */
|
||||
case 53: /* 32nm Atom "Cloverview" */
|
||||
case 54: /* 32nm Atom "Cedarview" */
|
||||
case INTEL_FAM6_ATOM_PINEVIEW:
|
||||
case INTEL_FAM6_ATOM_LINCROFT:
|
||||
case INTEL_FAM6_ATOM_PENWELL:
|
||||
case INTEL_FAM6_ATOM_CLOVERVIEW:
|
||||
case INTEL_FAM6_ATOM_CEDARVIEW:
|
||||
memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
|
@ -3602,9 +3686,9 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Atom events, ");
|
||||
break;
|
||||
|
||||
case 55: /* 22nm Atom "Silvermont" */
|
||||
case 76: /* 14nm Atom "Airmont" */
|
||||
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
|
||||
case INTEL_FAM6_ATOM_SILVERMONT1:
|
||||
case INTEL_FAM6_ATOM_SILVERMONT2:
|
||||
case INTEL_FAM6_ATOM_AIRMONT:
|
||||
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
|
||||
|
@ -3616,11 +3700,12 @@ __init int intel_pmu_init(void)
|
|||
x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints;
|
||||
x86_pmu.extra_regs = intel_slm_extra_regs;
|
||||
x86_pmu.flags |= PMU_FL_HAS_RSP_1;
|
||||
x86_pmu.cpu_events = slm_events_attrs;
|
||||
pr_cont("Silvermont events, ");
|
||||
break;
|
||||
|
||||
case 92: /* 14nm Atom "Goldmont" */
|
||||
case 95: /* 14nm Atom "Goldmont Denverton" */
|
||||
case INTEL_FAM6_ATOM_GOLDMONT:
|
||||
case INTEL_FAM6_ATOM_DENVERTON:
|
||||
memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
|
||||
|
@ -3643,9 +3728,9 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Goldmont events, ");
|
||||
break;
|
||||
|
||||
case 37: /* 32nm Westmere */
|
||||
case 44: /* 32nm Westmere-EP */
|
||||
case 47: /* 32nm Westmere-EX */
|
||||
case INTEL_FAM6_WESTMERE:
|
||||
case INTEL_FAM6_WESTMERE_EP:
|
||||
case INTEL_FAM6_WESTMERE_EX:
|
||||
memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
|
||||
|
@ -3672,8 +3757,8 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Westmere events, ");
|
||||
break;
|
||||
|
||||
case 42: /* 32nm SandyBridge */
|
||||
case 45: /* 32nm SandyBridge-E/EN/EP */
|
||||
case INTEL_FAM6_SANDYBRIDGE:
|
||||
case INTEL_FAM6_SANDYBRIDGE_X:
|
||||
x86_add_quirk(intel_sandybridge_quirk);
|
||||
x86_add_quirk(intel_ht_bug);
|
||||
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
||||
|
@ -3686,7 +3771,7 @@ __init int intel_pmu_init(void)
|
|||
x86_pmu.event_constraints = intel_snb_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
|
||||
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
|
||||
if (boot_cpu_data.x86_model == 45)
|
||||
if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
|
||||
x86_pmu.extra_regs = intel_snbep_extra_regs;
|
||||
else
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
|
@ -3708,8 +3793,8 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("SandyBridge events, ");
|
||||
break;
|
||||
|
||||
case 58: /* 22nm IvyBridge */
|
||||
case 62: /* 22nm IvyBridge-EP/EX */
|
||||
case INTEL_FAM6_IVYBRIDGE:
|
||||
case INTEL_FAM6_IVYBRIDGE_X:
|
||||
x86_add_quirk(intel_ht_bug);
|
||||
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
@ -3725,7 +3810,7 @@ __init int intel_pmu_init(void)
|
|||
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
|
||||
x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
|
||||
x86_pmu.pebs_prec_dist = true;
|
||||
if (boot_cpu_data.x86_model == 62)
|
||||
if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
|
||||
x86_pmu.extra_regs = intel_snbep_extra_regs;
|
||||
else
|
||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||
|
@ -3743,10 +3828,10 @@ __init int intel_pmu_init(void)
|
|||
break;
|
||||
|
||||
|
||||
case 60: /* 22nm Haswell Core */
|
||||
case 63: /* 22nm Haswell Server */
|
||||
case 69: /* 22nm Haswell ULT */
|
||||
case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
|
||||
case INTEL_FAM6_HASWELL_CORE:
|
||||
case INTEL_FAM6_HASWELL_X:
|
||||
case INTEL_FAM6_HASWELL_ULT:
|
||||
case INTEL_FAM6_HASWELL_GT3E:
|
||||
x86_add_quirk(intel_ht_bug);
|
||||
x86_pmu.late_ack = true;
|
||||
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
||||
|
@ -3770,10 +3855,10 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Haswell events, ");
|
||||
break;
|
||||
|
||||
case 61: /* 14nm Broadwell Core-M */
|
||||
case 86: /* 14nm Broadwell Xeon D */
|
||||
case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
|
||||
case 79: /* 14nm Broadwell Server */
|
||||
case INTEL_FAM6_BROADWELL_CORE:
|
||||
case INTEL_FAM6_BROADWELL_XEON_D:
|
||||
case INTEL_FAM6_BROADWELL_GT3E:
|
||||
case INTEL_FAM6_BROADWELL_X:
|
||||
x86_pmu.late_ack = true;
|
||||
memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
|
||||
|
@ -3806,7 +3891,7 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Broadwell events, ");
|
||||
break;
|
||||
|
||||
case 87: /* Knights Landing Xeon Phi */
|
||||
case INTEL_FAM6_XEON_PHI_KNL:
|
||||
memcpy(hw_cache_event_ids,
|
||||
slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs,
|
||||
|
@ -3824,16 +3909,22 @@ __init int intel_pmu_init(void)
|
|||
pr_cont("Knights Landing events, ");
|
||||
break;
|
||||
|
||||
case 142: /* 14nm Kabylake Mobile */
|
||||
case 158: /* 14nm Kabylake Desktop */
|
||||
case 78: /* 14nm Skylake Mobile */
|
||||
case 94: /* 14nm Skylake Desktop */
|
||||
case 85: /* 14nm Skylake Server */
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
x86_pmu.late_ack = true;
|
||||
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
|
||||
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
|
||||
intel_pmu_lbr_init_skl();
|
||||
|
||||
/* INT_MISC.RECOVERY_CYCLES has umask 1 in Skylake */
|
||||
event_attr_td_recovery_bubbles.event_str_noht =
|
||||
"event=0xd,umask=0x1,cmask=1";
|
||||
event_attr_td_recovery_bubbles.event_str_ht =
|
||||
"event=0xd,umask=0x1,cmask=1,any=1";
|
||||
|
||||
x86_pmu.event_constraints = intel_skl_event_constraints;
|
||||
x86_pmu.pebs_constraints = intel_skl_pebs_event_constraints;
|
||||
x86_pmu.extra_regs = intel_skl_extra_regs;
|
||||
|
@ -3914,6 +4005,8 @@ __init int intel_pmu_init(void)
|
|||
x86_pmu.lbr_nr = 0;
|
||||
}
|
||||
|
||||
if (x86_pmu.lbr_nr)
|
||||
pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr);
|
||||
/*
|
||||
* Access extra MSR may cause #GP under certain circumstances.
|
||||
* E.g. KVM doesn't support offcore event
|
||||
|
@ -3946,16 +4039,14 @@ __init int intel_pmu_init(void)
|
|||
*/
|
||||
static __init int fixup_ht_bug(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
int w, c;
|
||||
int c;
|
||||
/*
|
||||
* problem not present on this CPU model, nothing to do
|
||||
*/
|
||||
if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
|
||||
return 0;
|
||||
|
||||
w = cpumask_weight(topology_sibling_cpumask(cpu));
|
||||
if (w > 1) {
|
||||
if (topology_max_smt_threads() > 1) {
|
||||
pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -89,6 +89,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include "../perf_event.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -511,37 +512,37 @@ static const struct cstate_model slm_cstates __initconst = {
|
|||
{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long) &(states) }
|
||||
|
||||
static const struct x86_cpu_id intel_cstates_match[] __initconst = {
|
||||
X86_CSTATES_MODEL(30, nhm_cstates), /* 45nm Nehalem */
|
||||
X86_CSTATES_MODEL(26, nhm_cstates), /* 45nm Nehalem-EP */
|
||||
X86_CSTATES_MODEL(46, nhm_cstates), /* 45nm Nehalem-EX */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EP, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_NEHALEM_EX, nhm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(37, nhm_cstates), /* 32nm Westmere */
|
||||
X86_CSTATES_MODEL(44, nhm_cstates), /* 32nm Westmere-EP */
|
||||
X86_CSTATES_MODEL(47, nhm_cstates), /* 32nm Westmere-EX */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EP, nhm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_WESTMERE_EX, nhm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(42, snb_cstates), /* 32nm SandyBridge */
|
||||
X86_CSTATES_MODEL(45, snb_cstates), /* 32nm SandyBridge-E/EN/EP */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SANDYBRIDGE_X, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(58, snb_cstates), /* 22nm IvyBridge */
|
||||
X86_CSTATES_MODEL(62, snb_cstates), /* 22nm IvyBridge-EP/EX */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_IVYBRIDGE_X, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(60, snb_cstates), /* 22nm Haswell Core */
|
||||
X86_CSTATES_MODEL(63, snb_cstates), /* 22nm Haswell Server */
|
||||
X86_CSTATES_MODEL(70, snb_cstates), /* 22nm Haswell + GT3e */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_CORE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_X, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_GT3E, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(69, hswult_cstates), /* 22nm Haswell ULT */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_HASWELL_ULT, hswult_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(55, slm_cstates), /* 22nm Atom Silvermont */
|
||||
X86_CSTATES_MODEL(77, slm_cstates), /* 22nm Atom Avoton/Rangely */
|
||||
X86_CSTATES_MODEL(76, slm_cstates), /* 22nm Atom Airmont */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT1, slm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_SILVERMONT2, slm_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_AIRMONT, slm_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(61, snb_cstates), /* 14nm Broadwell Core-M */
|
||||
X86_CSTATES_MODEL(86, snb_cstates), /* 14nm Broadwell Xeon D */
|
||||
X86_CSTATES_MODEL(71, snb_cstates), /* 14nm Broadwell + GT3e */
|
||||
X86_CSTATES_MODEL(79, snb_cstates), /* 14nm Broadwell Server */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_CORE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_XEON_D, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_GT3E, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_BROADWELL_X, snb_cstates),
|
||||
|
||||
X86_CSTATES_MODEL(78, snb_cstates), /* 14nm Skylake Mobile */
|
||||
X86_CSTATES_MODEL(94, snb_cstates), /* 14nm Skylake Desktop */
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_MOBILE, snb_cstates),
|
||||
X86_CSTATES_MODEL(INTEL_FAM6_SKYLAKE_DESKTOP, snb_cstates),
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
|
||||
|
|
|
@ -77,9 +77,11 @@ static enum {
|
|||
LBR_IND_JMP |\
|
||||
LBR_FAR)
|
||||
|
||||
#define LBR_FROM_FLAG_MISPRED (1ULL << 63)
|
||||
#define LBR_FROM_FLAG_IN_TX (1ULL << 62)
|
||||
#define LBR_FROM_FLAG_ABORT (1ULL << 61)
|
||||
#define LBR_FROM_FLAG_MISPRED BIT_ULL(63)
|
||||
#define LBR_FROM_FLAG_IN_TX BIT_ULL(62)
|
||||
#define LBR_FROM_FLAG_ABORT BIT_ULL(61)
|
||||
|
||||
#define LBR_FROM_SIGNEXT_2MSB (BIT_ULL(60) | BIT_ULL(59))
|
||||
|
||||
/*
|
||||
* x86control flow change classification
|
||||
|
@ -235,6 +237,97 @@ enum {
|
|||
LBR_VALID,
|
||||
};
|
||||
|
||||
/*
|
||||
* For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in
|
||||
* MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when
|
||||
* TSX is not supported they have no consistent behavior:
|
||||
*
|
||||
* - For wrmsr(), bits 61:62 are considered part of the sign extension.
|
||||
* - For HW updates (branch captures) bits 61:62 are always OFF and are not
|
||||
* part of the sign extension.
|
||||
*
|
||||
* Therefore, if:
|
||||
*
|
||||
* 1) LBR has TSX format
|
||||
* 2) CPU has no TSX support enabled
|
||||
*
|
||||
* ... then any value passed to wrmsr() must be sign extended to 63 bits and any
|
||||
* value from rdmsr() must be converted to have a 61 bits sign extension,
|
||||
* ignoring the TSX flags.
|
||||
*/
|
||||
static inline bool lbr_from_signext_quirk_needed(void)
|
||||
{
|
||||
int lbr_format = x86_pmu.intel_cap.lbr_format;
|
||||
bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) ||
|
||||
boot_cpu_has(X86_FEATURE_RTM);
|
||||
|
||||
return !tsx_support && (lbr_desc[lbr_format] & LBR_TSX);
|
||||
}
|
||||
|
||||
DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key);
|
||||
|
||||
/* If quirk is enabled, ensure sign extension is 63 bits: */
|
||||
inline u64 lbr_from_signext_quirk_wr(u64 val)
|
||||
{
|
||||
if (static_branch_unlikely(&lbr_from_quirk_key)) {
|
||||
/*
|
||||
* Sign extend into bits 61:62 while preserving bit 63.
|
||||
*
|
||||
* Quirk is enabled when TSX is disabled. Therefore TSX bits
|
||||
* in val are always OFF and must be changed to be sign
|
||||
* extension bits. Since bits 59:60 are guaranteed to be
|
||||
* part of the sign extension bits, we can just copy them
|
||||
* to 61:62.
|
||||
*/
|
||||
val |= (LBR_FROM_SIGNEXT_2MSB & val) << 2;
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* If quirk is needed, ensure sign extension is 61 bits:
|
||||
*/
|
||||
u64 lbr_from_signext_quirk_rd(u64 val)
|
||||
{
|
||||
if (static_branch_unlikely(&lbr_from_quirk_key)) {
|
||||
/*
|
||||
* Quirk is on when TSX is not enabled. Therefore TSX
|
||||
* flags must be read as OFF.
|
||||
*/
|
||||
val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
static inline void wrlbr_from(unsigned int idx, u64 val)
|
||||
{
|
||||
val = lbr_from_signext_quirk_wr(val);
|
||||
wrmsrl(x86_pmu.lbr_from + idx, val);
|
||||
}
|
||||
|
||||
static inline void wrlbr_to(unsigned int idx, u64 val)
|
||||
{
|
||||
wrmsrl(x86_pmu.lbr_to + idx, val);
|
||||
}
|
||||
|
||||
static inline u64 rdlbr_from(unsigned int idx)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
rdmsrl(x86_pmu.lbr_from + idx, val);
|
||||
|
||||
return lbr_from_signext_quirk_rd(val);
|
||||
}
|
||||
|
||||
static inline u64 rdlbr_to(unsigned int idx)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
rdmsrl(x86_pmu.lbr_to + idx, val);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
||||
{
|
||||
int i;
|
||||
|
@ -251,8 +344,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
|||
tos = task_ctx->tos;
|
||||
for (i = 0; i < tos; i++) {
|
||||
lbr_idx = (tos - i) & mask;
|
||||
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
||||
wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
|
||||
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
|
||||
wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
|
||||
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
|
@ -262,9 +356,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
|||
|
||||
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
|
||||
{
|
||||
int i;
|
||||
unsigned lbr_idx, mask;
|
||||
u64 tos;
|
||||
int i;
|
||||
|
||||
if (task_ctx->lbr_callstack_users == 0) {
|
||||
task_ctx->lbr_stack_state = LBR_NONE;
|
||||
|
@ -275,8 +369,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
|
|||
tos = intel_pmu_lbr_tos();
|
||||
for (i = 0; i < tos; i++) {
|
||||
lbr_idx = (tos - i) & mask;
|
||||
rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
||||
rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
|
||||
task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
|
||||
task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
|
@ -452,8 +546,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
|
|||
u16 cycles = 0;
|
||||
int lbr_flags = lbr_desc[lbr_format];
|
||||
|
||||
rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
|
||||
rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
|
||||
from = rdlbr_from(lbr_idx);
|
||||
to = rdlbr_to(lbr_idx);
|
||||
|
||||
if (lbr_format == LBR_FORMAT_INFO && need_info) {
|
||||
u64 info;
|
||||
|
@ -956,7 +1050,6 @@ void __init intel_pmu_lbr_init_core(void)
|
|||
* SW branch filter usage:
|
||||
* - compensate for lack of HW filter
|
||||
*/
|
||||
pr_cont("4-deep LBR, ");
|
||||
}
|
||||
|
||||
/* nehalem/westmere */
|
||||
|
@ -977,7 +1070,6 @@ void __init intel_pmu_lbr_init_nhm(void)
|
|||
* That requires LBR_FAR but that means far
|
||||
* jmp need to be filtered out
|
||||
*/
|
||||
pr_cont("16-deep LBR, ");
|
||||
}
|
||||
|
||||
/* sandy bridge */
|
||||
|
@ -997,7 +1089,6 @@ void __init intel_pmu_lbr_init_snb(void)
|
|||
* That requires LBR_FAR but that means far
|
||||
* jmp need to be filtered out
|
||||
*/
|
||||
pr_cont("16-deep LBR, ");
|
||||
}
|
||||
|
||||
/* haswell */
|
||||
|
@ -1011,7 +1102,8 @@ void intel_pmu_lbr_init_hsw(void)
|
|||
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
|
||||
x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
|
||||
|
||||
pr_cont("16-deep LBR, ");
|
||||
if (lbr_from_signext_quirk_needed())
|
||||
static_branch_enable(&lbr_from_quirk_key);
|
||||
}
|
||||
|
||||
/* skylake */
|
||||
|
@ -1031,7 +1123,6 @@ __init void intel_pmu_lbr_init_skl(void)
|
|||
* That requires LBR_FAR but that means far
|
||||
* jmp need to be filtered out
|
||||
*/
|
||||
pr_cont("32-deep LBR, ");
|
||||
}
|
||||
|
||||
/* atom */
|
||||
|
@ -1057,7 +1148,6 @@ void __init intel_pmu_lbr_init_atom(void)
|
|||
* SW branch filter usage:
|
||||
* - compensate for lack of HW filter
|
||||
*/
|
||||
pr_cont("8-deep LBR, ");
|
||||
}
|
||||
|
||||
/* slm */
|
||||
|
@ -1088,6 +1178,4 @@ void intel_pmu_lbr_init_knl(void)
|
|||
|
||||
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
|
||||
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
|
||||
|
||||
pr_cont("8-deep LBR, ");
|
||||
}
|
||||
|
|
|
@ -55,6 +55,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include "../perf_event.h"
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
@ -786,26 +787,27 @@ static const struct intel_rapl_init_fun skl_rapl_init __initconst = {
|
|||
};
|
||||
|
||||
static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
|
||||
X86_RAPL_MODEL_MATCH(42, snb_rapl_init), /* Sandy Bridge */
|
||||
X86_RAPL_MODEL_MATCH(45, snbep_rapl_init), /* Sandy Bridge-EP */
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(58, snb_rapl_init), /* Ivy Bridge */
|
||||
X86_RAPL_MODEL_MATCH(62, snbep_rapl_init), /* IvyTown */
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, snb_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, snbep_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(60, hsw_rapl_init), /* Haswell */
|
||||
X86_RAPL_MODEL_MATCH(63, hsx_rapl_init), /* Haswell-Server */
|
||||
X86_RAPL_MODEL_MATCH(69, hsw_rapl_init), /* Haswell-Celeron */
|
||||
X86_RAPL_MODEL_MATCH(70, hsw_rapl_init), /* Haswell GT3e */
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(61, hsw_rapl_init), /* Broadwell */
|
||||
X86_RAPL_MODEL_MATCH(71, hsw_rapl_init), /* Broadwell-H */
|
||||
X86_RAPL_MODEL_MATCH(79, hsx_rapl_init), /* Broadwell-Server */
|
||||
X86_RAPL_MODEL_MATCH(86, hsx_rapl_init), /* Broadwell Xeon D */
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, hsw_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, hsw_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(87, knl_rapl_init), /* Knights Landing */
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_rapl_init),
|
||||
|
||||
X86_RAPL_MODEL_MATCH(78, skl_rapl_init), /* Skylake */
|
||||
X86_RAPL_MODEL_MATCH(94, skl_rapl_init), /* Skylake H/S */
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP, skl_rapl_init),
|
||||
X86_RAPL_MODEL_MATCH(INTEL_FAM6_SKYLAKE_X, hsx_rapl_init),
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include "uncore.h"
|
||||
|
||||
static struct intel_uncore_type *empty_uncore[] = { NULL, };
|
||||
|
@ -882,7 +883,7 @@ uncore_types_init(struct intel_uncore_type **types, bool setid)
|
|||
static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct intel_uncore_type *type;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_pmu *pmu = NULL;
|
||||
struct intel_uncore_box *box;
|
||||
int phys_id, pkg, ret;
|
||||
|
||||
|
@ -903,20 +904,37 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
|||
}
|
||||
|
||||
type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)];
|
||||
|
||||
/*
|
||||
* for performance monitoring unit with multiple boxes,
|
||||
* each box has a different function id.
|
||||
* Some platforms, e.g. Knights Landing, use a common PCI device ID
|
||||
* for multiple instances of an uncore PMU device type. We should check
|
||||
* PCI slot and func to indicate the uncore box.
|
||||
*/
|
||||
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
|
||||
/* Knights Landing uses a common PCI device ID for multiple instances of
|
||||
* an uncore PMU device type. There is only one entry per device type in
|
||||
* the knl_uncore_pci_ids table inspite of multiple devices present for
|
||||
* some device types. Hence PCI device idx would be 0 for all devices.
|
||||
* So increment pmu pointer to point to an unused array element.
|
||||
*/
|
||||
if (boot_cpu_data.x86_model == 87) {
|
||||
while (pmu->func_id >= 0)
|
||||
pmu++;
|
||||
if (id->driver_data & ~0xffff) {
|
||||
struct pci_driver *pci_drv = pdev->driver;
|
||||
const struct pci_device_id *ids = pci_drv->id_table;
|
||||
unsigned int devfn;
|
||||
|
||||
while (ids && ids->vendor) {
|
||||
if ((ids->vendor == pdev->vendor) &&
|
||||
(ids->device == pdev->device)) {
|
||||
devfn = PCI_DEVFN(UNCORE_PCI_DEV_DEV(ids->driver_data),
|
||||
UNCORE_PCI_DEV_FUNC(ids->driver_data));
|
||||
if (devfn == pdev->devfn) {
|
||||
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(ids->driver_data)];
|
||||
break;
|
||||
}
|
||||
}
|
||||
ids++;
|
||||
}
|
||||
if (pmu == NULL)
|
||||
return -ENODEV;
|
||||
} else {
|
||||
/*
|
||||
* for performance monitoring unit with multiple boxes,
|
||||
* each box has a different function id.
|
||||
*/
|
||||
pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)];
|
||||
}
|
||||
|
||||
if (WARN_ON_ONCE(pmu->boxes[pkg] != NULL))
|
||||
|
@ -956,7 +974,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
|
|||
|
||||
static void uncore_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct intel_uncore_box *box = pci_get_drvdata(pdev);
|
||||
struct intel_uncore_box *box;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
int i, phys_id, pkg;
|
||||
|
||||
|
@ -1361,30 +1379,32 @@ static const struct intel_uncore_init_fun knl_uncore_init __initconst = {
|
|||
};
|
||||
|
||||
static const struct intel_uncore_init_fun skl_uncore_init __initconst = {
|
||||
.cpu_init = skl_uncore_cpu_init,
|
||||
.pci_init = skl_uncore_pci_init,
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_uncore_match[] __initconst = {
|
||||
X86_UNCORE_MODEL_MATCH(26, nhm_uncore_init), /* Nehalem */
|
||||
X86_UNCORE_MODEL_MATCH(30, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(37, nhm_uncore_init), /* Westmere */
|
||||
X86_UNCORE_MODEL_MATCH(44, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(42, snb_uncore_init), /* Sandy Bridge */
|
||||
X86_UNCORE_MODEL_MATCH(58, ivb_uncore_init), /* Ivy Bridge */
|
||||
X86_UNCORE_MODEL_MATCH(60, hsw_uncore_init), /* Haswell */
|
||||
X86_UNCORE_MODEL_MATCH(69, hsw_uncore_init), /* Haswell Celeron */
|
||||
X86_UNCORE_MODEL_MATCH(70, hsw_uncore_init), /* Haswell */
|
||||
X86_UNCORE_MODEL_MATCH(61, bdw_uncore_init), /* Broadwell */
|
||||
X86_UNCORE_MODEL_MATCH(71, bdw_uncore_init), /* Broadwell */
|
||||
X86_UNCORE_MODEL_MATCH(45, snbep_uncore_init), /* Sandy Bridge-EP */
|
||||
X86_UNCORE_MODEL_MATCH(46, nhmex_uncore_init), /* Nehalem-EX */
|
||||
X86_UNCORE_MODEL_MATCH(47, nhmex_uncore_init), /* Westmere-EX aka. Xeon E7 */
|
||||
X86_UNCORE_MODEL_MATCH(62, ivbep_uncore_init), /* Ivy Bridge-EP */
|
||||
X86_UNCORE_MODEL_MATCH(63, hswep_uncore_init), /* Haswell-EP */
|
||||
X86_UNCORE_MODEL_MATCH(79, bdx_uncore_init), /* BDX-EP */
|
||||
X86_UNCORE_MODEL_MATCH(86, bdx_uncore_init), /* BDX-DE */
|
||||
X86_UNCORE_MODEL_MATCH(87, knl_uncore_init), /* Knights Landing */
|
||||
X86_UNCORE_MODEL_MATCH(94, skl_uncore_init), /* SkyLake */
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EP, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EP, nhm_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE, snb_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE, ivb_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_CORE, hsw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_ULT, hsw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_GT3E, hsw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_CORE, bdw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_GT3E, bdw_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SANDYBRIDGE_X, snbep_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_NEHALEM_EX, nhmex_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_WESTMERE_EX, nhmex_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_IVYBRIDGE_X, ivbep_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_HASWELL_X, hswep_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_X, bdx_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_BROADWELL_XEON_D, bdx_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_XEON_PHI_KNL, knl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_DESKTOP,skl_uncore_init),
|
||||
X86_UNCORE_MODEL_MATCH(INTEL_FAM6_SKYLAKE_MOBILE, skl_uncore_init),
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -15,7 +15,11 @@
|
|||
#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC
|
||||
#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1)
|
||||
|
||||
#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx) \
|
||||
((dev << 24) | (func << 16) | (type << 8) | idx)
|
||||
#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx)
|
||||
#define UNCORE_PCI_DEV_DEV(data) ((data >> 24) & 0xff)
|
||||
#define UNCORE_PCI_DEV_FUNC(data) ((data >> 16) & 0xff)
|
||||
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
|
||||
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
|
||||
#define UNCORE_EXTRA_PCI_DEV 0xff
|
||||
|
@ -360,6 +364,7 @@ int bdw_uncore_pci_init(void);
|
|||
int skl_uncore_pci_init(void);
|
||||
void snb_uncore_cpu_init(void);
|
||||
void nhm_uncore_cpu_init(void);
|
||||
void skl_uncore_cpu_init(void);
|
||||
int snb_pci2phy_map_init(int devid);
|
||||
|
||||
/* perf_event_intel_uncore_snbep.c */
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Nehalem/SandBridge/Haswell uncore support */
|
||||
/* Nehalem/SandBridge/Haswell/Broadwell/Skylake uncore support */
|
||||
#include "uncore.h"
|
||||
|
||||
/* Uncore IMC PCI IDs */
|
||||
|
@ -9,6 +9,7 @@
|
|||
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
|
||||
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c
|
||||
|
||||
/* SNB event control */
|
||||
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
|
||||
|
@ -64,6 +65,10 @@
|
|||
#define NHM_UNC_PERFEVTSEL0 0x3c0
|
||||
#define NHM_UNC_UNCORE_PMC0 0x3b0
|
||||
|
||||
/* SKL uncore global control */
|
||||
#define SKL_UNC_PERF_GLOBAL_CTL 0xe01
|
||||
#define SKL_UNC_GLOBAL_CTL_CORE_ALL ((1 << 5) - 1)
|
||||
|
||||
DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15");
|
||||
DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18");
|
||||
|
@ -179,6 +184,60 @@ void snb_uncore_cpu_init(void)
|
|||
snb_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
}
|
||||
|
||||
static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pmu->pmu_idx == 0) {
|
||||
wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
|
||||
SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
|
||||
}
|
||||
}
|
||||
|
||||
static void skl_uncore_msr_exit_box(struct intel_uncore_box *box)
|
||||
{
|
||||
if (box->pmu->pmu_idx == 0)
|
||||
wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, 0);
|
||||
}
|
||||
|
||||
static struct intel_uncore_ops skl_uncore_msr_ops = {
|
||||
.init_box = skl_uncore_msr_init_box,
|
||||
.exit_box = skl_uncore_msr_exit_box,
|
||||
.disable_event = snb_uncore_msr_disable_event,
|
||||
.enable_event = snb_uncore_msr_enable_event,
|
||||
.read_counter = uncore_msr_read_counter,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type skl_uncore_cbox = {
|
||||
.name = "cbox",
|
||||
.num_counters = 4,
|
||||
.num_boxes = 5,
|
||||
.perf_ctr_bits = 44,
|
||||
.fixed_ctr_bits = 48,
|
||||
.perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
|
||||
.event_ctl = SNB_UNC_CBO_0_PERFEVTSEL0,
|
||||
.fixed_ctr = SNB_UNC_FIXED_CTR,
|
||||
.fixed_ctl = SNB_UNC_FIXED_CTR_CTRL,
|
||||
.single_fixed = 1,
|
||||
.event_mask = SNB_UNC_RAW_EVENT_MASK,
|
||||
.msr_offset = SNB_UNC_CBO_MSR_OFFSET,
|
||||
.ops = &skl_uncore_msr_ops,
|
||||
.format_group = &snb_uncore_format_group,
|
||||
.event_descs = snb_uncore_events,
|
||||
};
|
||||
|
||||
static struct intel_uncore_type *skl_msr_uncores[] = {
|
||||
&skl_uncore_cbox,
|
||||
&snb_uncore_arb,
|
||||
NULL,
|
||||
};
|
||||
|
||||
void skl_uncore_cpu_init(void)
|
||||
{
|
||||
uncore_msr_uncores = skl_msr_uncores;
|
||||
if (skl_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
skl_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
snb_uncore_arb.ops = &skl_uncore_msr_ops;
|
||||
}
|
||||
|
||||
enum {
|
||||
SNB_PCI_UNCORE_IMC,
|
||||
};
|
||||
|
@ -544,6 +603,11 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
{ /* IMC */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||
},
|
||||
|
||||
{ /* end: all zeroes */ },
|
||||
};
|
||||
|
||||
|
@ -587,6 +651,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
|
|||
IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
|
||||
IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
|
||||
IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */
|
||||
IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
|
||||
{ /* end marker */ }
|
||||
};
|
||||
|
||||
|
|
|
@ -2164,21 +2164,101 @@ static struct intel_uncore_type *knl_pci_uncores[] = {
|
|||
*/
|
||||
|
||||
static const struct pci_device_id knl_uncore_pci_ids[] = {
|
||||
{ /* MC UClk */
|
||||
{ /* MC0 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_UCLK, 0),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(10, 0, KNL_PCI_UNCORE_MC_UCLK, 0),
|
||||
},
|
||||
{ /* MC DClk Channel */
|
||||
{ /* MC1 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7841),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(11, 0, KNL_PCI_UNCORE_MC_UCLK, 1),
|
||||
},
|
||||
{ /* MC0 DClk CH 0 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_MC_DCLK, 0),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 2, KNL_PCI_UNCORE_MC_DCLK, 0),
|
||||
},
|
||||
{ /* EDC UClk */
|
||||
{ /* MC0 DClk CH 1 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 3, KNL_PCI_UNCORE_MC_DCLK, 1),
|
||||
},
|
||||
{ /* MC0 DClk CH 2 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(8, 4, KNL_PCI_UNCORE_MC_DCLK, 2),
|
||||
},
|
||||
{ /* MC1 DClk CH 0 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 2, KNL_PCI_UNCORE_MC_DCLK, 3),
|
||||
},
|
||||
{ /* MC1 DClk CH 1 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 3, KNL_PCI_UNCORE_MC_DCLK, 4),
|
||||
},
|
||||
{ /* MC1 DClk CH 2 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7843),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(9, 4, KNL_PCI_UNCORE_MC_DCLK, 5),
|
||||
},
|
||||
{ /* EDC0 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_UCLK, 0),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(15, 0, KNL_PCI_UNCORE_EDC_UCLK, 0),
|
||||
},
|
||||
{ /* EDC EClk */
|
||||
{ /* EDC1 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(16, 0, KNL_PCI_UNCORE_EDC_UCLK, 1),
|
||||
},
|
||||
{ /* EDC2 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(17, 0, KNL_PCI_UNCORE_EDC_UCLK, 2),
|
||||
},
|
||||
{ /* EDC3 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, KNL_PCI_UNCORE_EDC_UCLK, 3),
|
||||
},
|
||||
{ /* EDC4 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(19, 0, KNL_PCI_UNCORE_EDC_UCLK, 4),
|
||||
},
|
||||
{ /* EDC5 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(20, 0, KNL_PCI_UNCORE_EDC_UCLK, 5),
|
||||
},
|
||||
{ /* EDC6 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 0, KNL_PCI_UNCORE_EDC_UCLK, 6),
|
||||
},
|
||||
{ /* EDC7 UClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7833),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(22, 0, KNL_PCI_UNCORE_EDC_UCLK, 7),
|
||||
},
|
||||
{ /* EDC0 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_DATA(KNL_PCI_UNCORE_EDC_ECLK, 0),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(24, 2, KNL_PCI_UNCORE_EDC_ECLK, 0),
|
||||
},
|
||||
{ /* EDC1 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(25, 2, KNL_PCI_UNCORE_EDC_ECLK, 1),
|
||||
},
|
||||
{ /* EDC2 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(26, 2, KNL_PCI_UNCORE_EDC_ECLK, 2),
|
||||
},
|
||||
{ /* EDC3 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(27, 2, KNL_PCI_UNCORE_EDC_ECLK, 3),
|
||||
},
|
||||
{ /* EDC4 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(28, 2, KNL_PCI_UNCORE_EDC_ECLK, 4),
|
||||
},
|
||||
{ /* EDC5 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(29, 2, KNL_PCI_UNCORE_EDC_ECLK, 5),
|
||||
},
|
||||
{ /* EDC6 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(30, 2, KNL_PCI_UNCORE_EDC_ECLK, 6),
|
||||
},
|
||||
{ /* EDC7 EClk */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7835),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(31, 2, KNL_PCI_UNCORE_EDC_ECLK, 7),
|
||||
},
|
||||
{ /* M2PCIe */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x7817),
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <linux/perf_event.h>
|
||||
#include <asm/intel-family.h>
|
||||
|
||||
enum perf_msr_id {
|
||||
PERF_MSR_TSC = 0,
|
||||
|
@ -34,39 +35,43 @@ static bool test_intel(int idx)
|
|||
return false;
|
||||
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 30: /* 45nm Nehalem */
|
||||
case 26: /* 45nm Nehalem-EP */
|
||||
case 46: /* 45nm Nehalem-EX */
|
||||
case INTEL_FAM6_NEHALEM:
|
||||
case INTEL_FAM6_NEHALEM_EP:
|
||||
case INTEL_FAM6_NEHALEM_EX:
|
||||
|
||||
case 37: /* 32nm Westmere */
|
||||
case 44: /* 32nm Westmere-EP */
|
||||
case 47: /* 32nm Westmere-EX */
|
||||
case INTEL_FAM6_WESTMERE:
|
||||
case INTEL_FAM6_WESTMERE2:
|
||||
case INTEL_FAM6_WESTMERE_EP:
|
||||
case INTEL_FAM6_WESTMERE_EX:
|
||||
|
||||
case 42: /* 32nm SandyBridge */
|
||||
case 45: /* 32nm SandyBridge-E/EN/EP */
|
||||
case INTEL_FAM6_SANDYBRIDGE:
|
||||
case INTEL_FAM6_SANDYBRIDGE_X:
|
||||
|
||||
case 58: /* 22nm IvyBridge */
|
||||
case 62: /* 22nm IvyBridge-EP/EX */
|
||||
case INTEL_FAM6_IVYBRIDGE:
|
||||
case INTEL_FAM6_IVYBRIDGE_X:
|
||||
|
||||
case 60: /* 22nm Haswell Core */
|
||||
case 63: /* 22nm Haswell Server */
|
||||
case 69: /* 22nm Haswell ULT */
|
||||
case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
|
||||
case INTEL_FAM6_HASWELL_CORE:
|
||||
case INTEL_FAM6_HASWELL_X:
|
||||
case INTEL_FAM6_HASWELL_ULT:
|
||||
case INTEL_FAM6_HASWELL_GT3E:
|
||||
|
||||
case 61: /* 14nm Broadwell Core-M */
|
||||
case 86: /* 14nm Broadwell Xeon D */
|
||||
case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
|
||||
case 79: /* 14nm Broadwell Server */
|
||||
case INTEL_FAM6_BROADWELL_CORE:
|
||||
case INTEL_FAM6_BROADWELL_XEON_D:
|
||||
case INTEL_FAM6_BROADWELL_GT3E:
|
||||
case INTEL_FAM6_BROADWELL_X:
|
||||
|
||||
case 55: /* 22nm Atom "Silvermont" */
|
||||
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
|
||||
case 76: /* 14nm Atom "Airmont" */
|
||||
case INTEL_FAM6_ATOM_SILVERMONT1:
|
||||
case INTEL_FAM6_ATOM_SILVERMONT2:
|
||||
case INTEL_FAM6_ATOM_AIRMONT:
|
||||
if (idx == PERF_MSR_SMI)
|
||||
return true;
|
||||
break;
|
||||
|
||||
case 78: /* 14nm Skylake Mobile */
|
||||
case 94: /* 14nm Skylake Desktop */
|
||||
case INTEL_FAM6_SKYLAKE_MOBILE:
|
||||
case INTEL_FAM6_SKYLAKE_DESKTOP:
|
||||
case INTEL_FAM6_SKYLAKE_X:
|
||||
case INTEL_FAM6_KABYLAKE_MOBILE:
|
||||
case INTEL_FAM6_KABYLAKE_DESKTOP:
|
||||
if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
|
||||
return true;
|
||||
break;
|
||||
|
|
|
@ -668,6 +668,14 @@ static struct perf_pmu_events_attr event_attr_##v = { \
|
|||
.event_str = str, \
|
||||
};
|
||||
|
||||
#define EVENT_ATTR_STR_HT(_name, v, noht, ht) \
|
||||
static struct perf_pmu_events_ht_attr event_attr_##v = { \
|
||||
.attr = __ATTR(_name, 0444, events_ht_sysfs_show, NULL),\
|
||||
.id = 0, \
|
||||
.event_str_noht = noht, \
|
||||
.event_str_ht = ht, \
|
||||
}
|
||||
|
||||
extern struct x86_pmu x86_pmu __read_mostly;
|
||||
|
||||
static inline bool x86_pmu_has_lbr_callstack(void)
|
||||
|
@ -803,6 +811,8 @@ struct attribute **merge_attr(struct attribute **a, struct attribute **b);
|
|||
|
||||
ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
char *page);
|
||||
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
char *page);
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
|
||||
|
@ -892,6 +902,8 @@ void intel_ds_init(void);
|
|||
|
||||
void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
|
||||
|
||||
u64 lbr_from_signext_quirk_wr(u64 val);
|
||||
|
||||
void intel_pmu_lbr_reset(void);
|
||||
|
||||
void intel_pmu_lbr_enable(struct perf_event *event);
|
||||
|
|
|
@ -129,6 +129,14 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
|
|||
|
||||
extern unsigned int __max_logical_packages;
|
||||
#define topology_max_packages() (__max_logical_packages)
|
||||
|
||||
extern int __max_smt_threads;
|
||||
|
||||
static inline int topology_max_smt_threads(void)
|
||||
{
|
||||
return __max_smt_threads;
|
||||
}
|
||||
|
||||
int topology_update_package_map(unsigned int apicid, unsigned int cpu);
|
||||
extern int topology_phys_to_logical_pkg(unsigned int pkg);
|
||||
#else
|
||||
|
@ -136,6 +144,7 @@ extern int topology_phys_to_logical_pkg(unsigned int pkg);
|
|||
static inline int
|
||||
topology_update_package_map(unsigned int apicid, unsigned int cpu) { return 0; }
|
||||
static inline int topology_phys_to_logical_pkg(unsigned int pkg) { return 0; }
|
||||
static inline int topology_max_smt_threads(void) { return 1; }
|
||||
#endif
|
||||
|
||||
static inline void arch_fix_phys_package_id(int num, u32 slot)
|
||||
|
|
|
@ -105,6 +105,9 @@ static unsigned int max_physical_pkg_id __read_mostly;
|
|||
unsigned int __max_logical_packages __read_mostly;
|
||||
EXPORT_SYMBOL(__max_logical_packages);
|
||||
|
||||
/* Maximum number of SMT threads on any online core */
|
||||
int __max_smt_threads __read_mostly;
|
||||
|
||||
static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -493,7 +496,7 @@ void set_cpu_sibling_map(int cpu)
|
|||
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
struct cpuinfo_x86 *o;
|
||||
int i;
|
||||
int i, threads;
|
||||
|
||||
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
|
||||
|
||||
|
@ -550,6 +553,10 @@ void set_cpu_sibling_map(int cpu)
|
|||
if (match_die(c, o) && !topology_same_node(c, o))
|
||||
primarily_use_numa_for_topology();
|
||||
}
|
||||
|
||||
threads = cpumask_weight(topology_sibling_cpumask(cpu));
|
||||
if (threads > __max_smt_threads)
|
||||
__max_smt_threads = threads;
|
||||
}
|
||||
|
||||
/* maps the cpu to the sched domain representing multi-core */
|
||||
|
@ -1441,6 +1448,21 @@ __init void prefill_possible_map(void)
|
|||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
/* Recompute SMT state for all CPUs on offline */
|
||||
static void recompute_smt_state(void)
|
||||
{
|
||||
int max_threads, cpu;
|
||||
|
||||
max_threads = 0;
|
||||
for_each_online_cpu (cpu) {
|
||||
int threads = cpumask_weight(topology_sibling_cpumask(cpu));
|
||||
|
||||
if (threads > max_threads)
|
||||
max_threads = threads;
|
||||
}
|
||||
__max_smt_threads = max_threads;
|
||||
}
|
||||
|
||||
static void remove_siblinginfo(int cpu)
|
||||
{
|
||||
int sibling;
|
||||
|
@ -1465,6 +1487,7 @@ static void remove_siblinginfo(int cpu)
|
|||
c->phys_proc_id = 0;
|
||||
c->cpu_core_id = 0;
|
||||
cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
|
||||
recompute_smt_state();
|
||||
}
|
||||
|
||||
static void remove_cpu_from_maps(int cpu)
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/pmc_core.h>
|
||||
|
||||
#include "intel_pmc_core.h"
|
||||
|
@ -138,10 +139,10 @@ static inline void pmc_core_dbgfs_unregister(struct pmc_dev *pmcdev)
|
|||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
||||
static const struct x86_cpu_id intel_pmc_core_ids[] = {
|
||||
{ X86_VENDOR_INTEL, 6, 0x4e, X86_FEATURE_MWAIT,
|
||||
(kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
|
||||
{ X86_VENDOR_INTEL, 6, 0x5e, X86_FEATURE_MWAIT,
|
||||
(kernel_ulong_t)NULL}, /* Skylake CPUID Signature */
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_MOBILE, X86_FEATURE_MWAIT,
|
||||
(kernel_ulong_t)NULL},
|
||||
{ X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_DESKTOP, X86_FEATURE_MWAIT,
|
||||
(kernel_ulong_t)NULL},
|
||||
{}
|
||||
};
|
||||
|
||||
|
|
|
@ -517,6 +517,11 @@ struct swevent_hlist {
|
|||
struct perf_cgroup;
|
||||
struct ring_buffer;
|
||||
|
||||
struct pmu_event_list {
|
||||
raw_spinlock_t lock;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_event - performance event kernel representation:
|
||||
*/
|
||||
|
@ -675,6 +680,7 @@ struct perf_event {
|
|||
int cgrp_defer_enabled;
|
||||
#endif
|
||||
|
||||
struct list_head sb_list;
|
||||
#endif /* CONFIG_PERF_EVENTS */
|
||||
};
|
||||
|
||||
|
@ -1074,7 +1080,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
|
|||
extern struct perf_callchain_entry *
|
||||
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
||||
u32 max_stack, bool crosstask, bool add_mark);
|
||||
extern int get_callchain_buffers(void);
|
||||
extern int get_callchain_buffers(int max_stack);
|
||||
extern void put_callchain_buffers(void);
|
||||
|
||||
extern int sysctl_perf_event_max_stack;
|
||||
|
@ -1326,6 +1332,13 @@ struct perf_pmu_events_attr {
|
|||
const char *event_str;
|
||||
};
|
||||
|
||||
struct perf_pmu_events_ht_attr {
|
||||
struct device_attribute attr;
|
||||
u64 id;
|
||||
const char *event_str_ht;
|
||||
const char *event_str_noht;
|
||||
};
|
||||
|
||||
ssize_t perf_event_sysfs_show(struct device *dev, struct device_attribute *attr,
|
||||
char *page);
|
||||
|
||||
|
|
|
@ -276,6 +276,9 @@ enum perf_event_read_format {
|
|||
|
||||
/*
|
||||
* Hardware event_id to monitor via a performance monitoring event:
|
||||
*
|
||||
* @sample_max_stack: Max number of frame pointers in a callchain,
|
||||
* should be < /proc/sys/kernel/perf_event_max_stack
|
||||
*/
|
||||
struct perf_event_attr {
|
||||
|
||||
|
@ -385,7 +388,8 @@ struct perf_event_attr {
|
|||
* Wakeup watermark for AUX area
|
||||
*/
|
||||
__u32 aux_watermark;
|
||||
__u32 __reserved_2; /* align to __u64 */
|
||||
__u16 sample_max_stack;
|
||||
__u16 __reserved_2; /* align to __u64 */
|
||||
};
|
||||
|
||||
#define perf_flags(attr) (*(&(attr)->read_format + 1))
|
||||
|
|
|
@ -99,7 +99,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
|
|||
if (err)
|
||||
goto free_smap;
|
||||
|
||||
err = get_callchain_buffers();
|
||||
err = get_callchain_buffers(sysctl_perf_event_max_stack);
|
||||
if (err)
|
||||
goto free_smap;
|
||||
|
||||
|
|
|
@ -104,7 +104,7 @@ fail:
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int get_callchain_buffers(void)
|
||||
int get_callchain_buffers(int event_max_stack)
|
||||
{
|
||||
int err = 0;
|
||||
int count;
|
||||
|
@ -121,6 +121,15 @@ int get_callchain_buffers(void)
|
|||
/* If the allocation failed, give up */
|
||||
if (!callchain_cpus_entries)
|
||||
err = -ENOMEM;
|
||||
/*
|
||||
* If requesting per event more than the global cap,
|
||||
* return a different error to help userspace figure
|
||||
* this out.
|
||||
*
|
||||
* And also do it here so that we have &callchain_mutex held.
|
||||
*/
|
||||
if (event_max_stack > sysctl_perf_event_max_stack)
|
||||
err = -EOVERFLOW;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
|
@ -174,11 +183,12 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs)
|
|||
bool user = !event->attr.exclude_callchain_user;
|
||||
/* Disallow cross-task user callchains. */
|
||||
bool crosstask = event->ctx->task && event->ctx->task != current;
|
||||
const u32 max_stack = event->attr.sample_max_stack;
|
||||
|
||||
if (!kernel && !user)
|
||||
return NULL;
|
||||
|
||||
return get_perf_callchain(regs, 0, kernel, user, sysctl_perf_event_max_stack, crosstask, true);
|
||||
return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
|
||||
}
|
||||
|
||||
struct perf_callchain_entry *
|
||||
|
|
|
@ -335,6 +335,7 @@ static atomic_t perf_sched_count;
|
|||
|
||||
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
|
||||
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
|
||||
static DEFINE_PER_CPU(struct pmu_event_list, pmu_sb_events);
|
||||
|
||||
static atomic_t nr_mmap_events __read_mostly;
|
||||
static atomic_t nr_comm_events __read_mostly;
|
||||
|
@ -396,6 +397,13 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
|
|||
if (ret || !write)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* If throttling is disabled don't allow the write:
|
||||
*/
|
||||
if (sysctl_perf_cpu_time_max_percent == 100 ||
|
||||
sysctl_perf_cpu_time_max_percent == 0)
|
||||
return -EINVAL;
|
||||
|
||||
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
|
||||
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
|
||||
update_perf_cpu_limits();
|
||||
|
@ -3686,6 +3694,39 @@ static void free_event_rcu(struct rcu_head *head)
|
|||
static void ring_buffer_attach(struct perf_event *event,
|
||||
struct ring_buffer *rb);
|
||||
|
||||
static void detach_sb_event(struct perf_event *event)
|
||||
{
|
||||
struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
|
||||
|
||||
raw_spin_lock(&pel->lock);
|
||||
list_del_rcu(&event->sb_list);
|
||||
raw_spin_unlock(&pel->lock);
|
||||
}
|
||||
|
||||
static bool is_sb_event(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_attr *attr = &event->attr;
|
||||
|
||||
if (event->parent)
|
||||
return false;
|
||||
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
return false;
|
||||
|
||||
if (attr->mmap || attr->mmap_data || attr->mmap2 ||
|
||||
attr->comm || attr->comm_exec ||
|
||||
attr->task ||
|
||||
attr->context_switch)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void unaccount_pmu_sb_event(struct perf_event *event)
|
||||
{
|
||||
if (is_sb_event(event))
|
||||
detach_sb_event(event);
|
||||
}
|
||||
|
||||
static void unaccount_event_cpu(struct perf_event *event, int cpu)
|
||||
{
|
||||
if (event->parent)
|
||||
|
@ -3749,6 +3790,8 @@ static void unaccount_event(struct perf_event *event)
|
|||
}
|
||||
|
||||
unaccount_event_cpu(event, event->cpu);
|
||||
|
||||
unaccount_pmu_sb_event(event);
|
||||
}
|
||||
|
||||
static void perf_sched_delayed(struct work_struct *work)
|
||||
|
@ -5875,11 +5918,11 @@ perf_event_read_event(struct perf_event *event,
|
|||
perf_output_end(&handle);
|
||||
}
|
||||
|
||||
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
|
||||
typedef void (perf_iterate_f)(struct perf_event *event, void *data);
|
||||
|
||||
static void
|
||||
perf_event_aux_ctx(struct perf_event_context *ctx,
|
||||
perf_event_aux_output_cb output,
|
||||
perf_iterate_ctx(struct perf_event_context *ctx,
|
||||
perf_iterate_f output,
|
||||
void *data, bool all)
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
@ -5896,52 +5939,55 @@ perf_event_aux_ctx(struct perf_event_context *ctx,
|
|||
}
|
||||
}
|
||||
|
||||
static void
|
||||
perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
|
||||
struct perf_event_context *task_ctx)
|
||||
static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
|
||||
{
|
||||
rcu_read_lock();
|
||||
preempt_disable();
|
||||
perf_event_aux_ctx(task_ctx, output, data, false);
|
||||
preempt_enable();
|
||||
rcu_read_unlock();
|
||||
struct pmu_event_list *pel = this_cpu_ptr(&pmu_sb_events);
|
||||
struct perf_event *event;
|
||||
|
||||
list_for_each_entry_rcu(event, &pel->list, sb_list) {
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE)
|
||||
continue;
|
||||
if (!event_filter_match(event))
|
||||
continue;
|
||||
output(event, data);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate all events that need to receive side-band events.
|
||||
*
|
||||
* For new callers; ensure that account_pmu_sb_event() includes
|
||||
* your event, otherwise it might not get delivered.
|
||||
*/
|
||||
static void
|
||||
perf_event_aux(perf_event_aux_output_cb output, void *data,
|
||||
perf_iterate_sb(perf_iterate_f output, void *data,
|
||||
struct perf_event_context *task_ctx)
|
||||
{
|
||||
struct perf_cpu_context *cpuctx;
|
||||
struct perf_event_context *ctx;
|
||||
struct pmu *pmu;
|
||||
int ctxn;
|
||||
|
||||
rcu_read_lock();
|
||||
preempt_disable();
|
||||
|
||||
/*
|
||||
* If we have task_ctx != NULL we only notify
|
||||
* the task context itself. The task_ctx is set
|
||||
* only for EXIT events before releasing task
|
||||
* If we have task_ctx != NULL we only notify the task context itself.
|
||||
* The task_ctx is set only for EXIT events before releasing task
|
||||
* context.
|
||||
*/
|
||||
if (task_ctx) {
|
||||
perf_event_aux_task_ctx(output, data, task_ctx);
|
||||
return;
|
||||
perf_iterate_ctx(task_ctx, output, data, false);
|
||||
goto done;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(pmu, &pmus, entry) {
|
||||
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
|
||||
if (cpuctx->unique_pmu != pmu)
|
||||
goto next;
|
||||
perf_event_aux_ctx(&cpuctx->ctx, output, data, false);
|
||||
ctxn = pmu->task_ctx_nr;
|
||||
if (ctxn < 0)
|
||||
goto next;
|
||||
perf_iterate_sb_cpu(output, data);
|
||||
|
||||
for_each_task_context_nr(ctxn) {
|
||||
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
|
||||
if (ctx)
|
||||
perf_event_aux_ctx(ctx, output, data, false);
|
||||
next:
|
||||
put_cpu_ptr(pmu->pmu_cpu_context);
|
||||
perf_iterate_ctx(ctx, output, data, false);
|
||||
}
|
||||
done:
|
||||
preempt_enable();
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
@ -5990,7 +6036,7 @@ void perf_event_exec(void)
|
|||
|
||||
perf_event_enable_on_exec(ctxn);
|
||||
|
||||
perf_event_aux_ctx(ctx, perf_event_addr_filters_exec, NULL,
|
||||
perf_iterate_ctx(ctx, perf_event_addr_filters_exec, NULL,
|
||||
true);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -6034,9 +6080,9 @@ static int __perf_pmu_output_stop(void *info)
|
|||
};
|
||||
|
||||
rcu_read_lock();
|
||||
perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
|
||||
perf_iterate_ctx(&cpuctx->ctx, __perf_event_output_stop, &ro, false);
|
||||
if (cpuctx->task_ctx)
|
||||
perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
|
||||
perf_iterate_ctx(cpuctx->task_ctx, __perf_event_output_stop,
|
||||
&ro, false);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -6165,7 +6211,7 @@ static void perf_event_task(struct task_struct *task,
|
|||
},
|
||||
};
|
||||
|
||||
perf_event_aux(perf_event_task_output,
|
||||
perf_iterate_sb(perf_event_task_output,
|
||||
&task_event,
|
||||
task_ctx);
|
||||
}
|
||||
|
@ -6244,7 +6290,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
|
|||
|
||||
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
|
||||
|
||||
perf_event_aux(perf_event_comm_output,
|
||||
perf_iterate_sb(perf_event_comm_output,
|
||||
comm_event,
|
||||
NULL);
|
||||
}
|
||||
|
@ -6475,7 +6521,7 @@ got_name:
|
|||
|
||||
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
|
||||
|
||||
perf_event_aux(perf_event_mmap_output,
|
||||
perf_iterate_sb(perf_event_mmap_output,
|
||||
mmap_event,
|
||||
NULL);
|
||||
|
||||
|
@ -6558,7 +6604,7 @@ static void perf_addr_filters_adjust(struct vm_area_struct *vma)
|
|||
if (!ctx)
|
||||
continue;
|
||||
|
||||
perf_event_aux_ctx(ctx, __perf_addr_filters_adjust, vma, true);
|
||||
perf_iterate_ctx(ctx, __perf_addr_filters_adjust, vma, true);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -6745,7 +6791,7 @@ static void perf_event_switch(struct task_struct *task,
|
|||
},
|
||||
};
|
||||
|
||||
perf_event_aux(perf_event_switch_output,
|
||||
perf_iterate_sb(perf_event_switch_output,
|
||||
&switch_event,
|
||||
NULL);
|
||||
}
|
||||
|
@ -8667,6 +8713,28 @@ unlock:
|
|||
return pmu;
|
||||
}
|
||||
|
||||
static void attach_sb_event(struct perf_event *event)
|
||||
{
|
||||
struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu);
|
||||
|
||||
raw_spin_lock(&pel->lock);
|
||||
list_add_rcu(&event->sb_list, &pel->list);
|
||||
raw_spin_unlock(&pel->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* We keep a list of all !task (and therefore per-cpu) events
|
||||
* that need to receive side-band records.
|
||||
*
|
||||
* This avoids having to scan all the various PMU per-cpu contexts
|
||||
* looking for them.
|
||||
*/
|
||||
static void account_pmu_sb_event(struct perf_event *event)
|
||||
{
|
||||
if (is_sb_event(event))
|
||||
attach_sb_event(event);
|
||||
}
|
||||
|
||||
static void account_event_cpu(struct perf_event *event, int cpu)
|
||||
{
|
||||
if (event->parent)
|
||||
|
@ -8747,6 +8815,8 @@ static void account_event(struct perf_event *event)
|
|||
enabled:
|
||||
|
||||
account_event_cpu(event, event->cpu);
|
||||
|
||||
account_pmu_sb_event(event);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -8895,7 +8965,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
|
|||
|
||||
if (!event->parent) {
|
||||
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
|
||||
err = get_callchain_buffers();
|
||||
err = get_callchain_buffers(attr->sample_max_stack);
|
||||
if (err)
|
||||
goto err_addr_filters;
|
||||
}
|
||||
|
@ -9217,6 +9287,9 @@ SYSCALL_DEFINE5(perf_event_open,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!attr.sample_max_stack)
|
||||
attr.sample_max_stack = sysctl_perf_event_max_stack;
|
||||
|
||||
/*
|
||||
* In cgroup mode, the pid argument is used to pass the fd
|
||||
* opened to the cgroup directory in cgroupfs. The cpu argument
|
||||
|
@ -9290,7 +9363,7 @@ SYSCALL_DEFINE5(perf_event_open,
|
|||
|
||||
if (is_sampling_event(event)) {
|
||||
if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
|
||||
err = -ENOTSUPP;
|
||||
err = -EOPNOTSUPP;
|
||||
goto err_alloc;
|
||||
}
|
||||
}
|
||||
|
@ -10252,6 +10325,9 @@ static void __init perf_event_init_all_cpus(void)
|
|||
swhash = &per_cpu(swevent_htable, cpu);
|
||||
mutex_init(&swhash->hlist_mutex);
|
||||
INIT_LIST_HEAD(&per_cpu(active_ctx_list, cpu));
|
||||
|
||||
INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
|
||||
raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
8
tools/arch/alpha/include/uapi/asm/bitsperlong.h
Normal file
8
tools/arch/alpha/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,8 @@
|
|||
#ifndef __ASM_ALPHA_BITSPERLONG_H
|
||||
#define __ASM_ALPHA_BITSPERLONG_H
|
||||
|
||||
#define __BITS_PER_LONG 64
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_ALPHA_BITSPERLONG_H */
|
224
tools/arch/arm/include/uapi/asm/kvm.h
Normal file
224
tools/arch/arm/include/uapi/asm/kvm.h
Normal file
|
@ -0,0 +1,224 @@
|
|||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_H__
|
||||
#define __ARM_KVM_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/psci.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_REG_SIZE(id) \
|
||||
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
|
||||
|
||||
/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
|
||||
#define KVM_ARM_SVC_sp svc_regs[0]
|
||||
#define KVM_ARM_SVC_lr svc_regs[1]
|
||||
#define KVM_ARM_SVC_spsr svc_regs[2]
|
||||
#define KVM_ARM_ABT_sp abt_regs[0]
|
||||
#define KVM_ARM_ABT_lr abt_regs[1]
|
||||
#define KVM_ARM_ABT_spsr abt_regs[2]
|
||||
#define KVM_ARM_UND_sp und_regs[0]
|
||||
#define KVM_ARM_UND_lr und_regs[1]
|
||||
#define KVM_ARM_UND_spsr und_regs[2]
|
||||
#define KVM_ARM_IRQ_sp irq_regs[0]
|
||||
#define KVM_ARM_IRQ_lr irq_regs[1]
|
||||
#define KVM_ARM_IRQ_spsr irq_regs[2]
|
||||
|
||||
/* Valid only for fiq_regs in struct kvm_regs */
|
||||
#define KVM_ARM_FIQ_r8 fiq_regs[0]
|
||||
#define KVM_ARM_FIQ_r9 fiq_regs[1]
|
||||
#define KVM_ARM_FIQ_r10 fiq_regs[2]
|
||||
#define KVM_ARM_FIQ_fp fiq_regs[3]
|
||||
#define KVM_ARM_FIQ_ip fiq_regs[4]
|
||||
#define KVM_ARM_FIQ_sp fiq_regs[5]
|
||||
#define KVM_ARM_FIQ_lr fiq_regs[6]
|
||||
#define KVM_ARM_FIQ_spsr fiq_regs[7]
|
||||
|
||||
struct kvm_regs {
|
||||
struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
|
||||
unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
|
||||
unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
|
||||
unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
|
||||
unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
|
||||
unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
|
||||
};
|
||||
|
||||
/* Supported Processor Types */
|
||||
#define KVM_ARM_TARGET_CORTEX_A15 0
|
||||
#define KVM_ARM_TARGET_CORTEX_A7 1
|
||||
#define KVM_ARM_NUM_TARGETS 2
|
||||
|
||||
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
|
||||
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_SHIFT 16
|
||||
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
|
||||
|
||||
/* Supported device IDs */
|
||||
#define KVM_ARM_DEVICE_VGIC_V2 0
|
||||
|
||||
/* Supported VGIC address types */
|
||||
#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
|
||||
#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
|
||||
|
||||
#define KVM_VGIC_V2_DIST_SIZE 0x1000
|
||||
#define KVM_VGIC_V2_CPU_SIZE 0x2000
|
||||
|
||||
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
|
||||
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
|
||||
|
||||
struct kvm_vcpu_init {
|
||||
__u32 target;
|
||||
__u32 features[7];
|
||||
};
|
||||
|
||||
struct kvm_sregs {
|
||||
};
|
||||
|
||||
struct kvm_fpu {
|
||||
};
|
||||
|
||||
struct kvm_guest_debug_arch {
|
||||
};
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
};
|
||||
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
/* If you need to interpret the index values, here is the key: */
|
||||
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
|
||||
#define KVM_REG_ARM_COPROC_SHIFT 16
|
||||
#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
|
||||
#define KVM_REG_ARM_32_OPC2_SHIFT 0
|
||||
#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
|
||||
#define KVM_REG_ARM_OPC1_SHIFT 3
|
||||
#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
|
||||
#define KVM_REG_ARM_CRM_SHIFT 7
|
||||
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
|
||||
#define KVM_REG_ARM_32_CRN_SHIFT 11
|
||||
|
||||
#define ARM_CP15_REG_SHIFT_MASK(x,n) \
|
||||
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
|
||||
|
||||
#define __ARM_CP15_REG(op1,crn,crm,op2) \
|
||||
(KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
|
||||
|
||||
#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
|
||||
|
||||
#define __ARM_CP15_REG64(op1,crm) \
|
||||
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
|
||||
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
|
||||
|
||||
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
|
||||
|
||||
/* Normal registers are mapped as coprocessor 16. */
|
||||
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
|
||||
|
||||
/* Some registers need more space to represent values. */
|
||||
#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
|
||||
#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
|
||||
#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
|
||||
#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
|
||||
|
||||
/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
|
||||
#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
|
||||
#define KVM_REG_ARM_VFP_BASE_REG 0x0
|
||||
#define KVM_REG_ARM_VFP_FPSID 0x1000
|
||||
#define KVM_REG_ARM_VFP_FPSCR 0x1001
|
||||
#define KVM_REG_ARM_VFP_MVFR1 0x1006
|
||||
#define KVM_REG_ARM_VFP_MVFR0 0x1007
|
||||
#define KVM_REG_ARM_VFP_FPEXC 0x1008
|
||||
#define KVM_REG_ARM_VFP_FPINST 0x1009
|
||||
#define KVM_REG_ARM_VFP_FPINST2 0x100A
|
||||
|
||||
/* Device Control API: ARM VGIC */
|
||||
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
|
||||
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
|
||||
#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
|
||||
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
|
||||
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_TYPE_SHIFT 24
|
||||
#define KVM_ARM_IRQ_TYPE_MASK 0xff
|
||||
#define KVM_ARM_IRQ_VCPU_SHIFT 16
|
||||
#define KVM_ARM_IRQ_VCPU_MASK 0xff
|
||||
#define KVM_ARM_IRQ_NUM_SHIFT 0
|
||||
#define KVM_ARM_IRQ_NUM_MASK 0xffff
|
||||
|
||||
/* irq_type field */
|
||||
#define KVM_ARM_IRQ_TYPE_CPU 0
|
||||
#define KVM_ARM_IRQ_TYPE_SPI 1
|
||||
#define KVM_ARM_IRQ_TYPE_PPI 2
|
||||
|
||||
/* out-of-kernel GIC cpu interrupt injection irq_number field */
|
||||
#define KVM_ARM_IRQ_CPU_IRQ 0
|
||||
#define KVM_ARM_IRQ_CPU_FIQ 1
|
||||
|
||||
/*
|
||||
* This used to hold the highest supported SPI, but it is now obsolete
|
||||
* and only here to provide source code level compatibility with older
|
||||
* userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
|
||||
*/
|
||||
#ifndef __KERNEL__
|
||||
#define KVM_ARM_IRQ_GIC_MAX 127
|
||||
#endif
|
||||
|
||||
/* One single KVM irqchip, ie. the VGIC */
|
||||
#define KVM_NR_IRQCHIPS 1
|
||||
|
||||
/* PSCI interface */
|
||||
#define KVM_PSCI_FN_BASE 0x95c1ba5e
|
||||
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
|
||||
|
||||
#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
|
||||
#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
|
||||
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
|
||||
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
|
||||
|
||||
#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
|
||||
#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
|
||||
#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
|
||||
#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
|
||||
|
||||
#endif /* __ARM_KVM_H__ */
|
23
tools/arch/arm/include/uapi/asm/perf_regs.h
Normal file
23
tools/arch/arm/include/uapi/asm/perf_regs.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
#ifndef _ASM_ARM_PERF_REGS_H
|
||||
#define _ASM_ARM_PERF_REGS_H
|
||||
|
||||
enum perf_event_arm_regs {
|
||||
PERF_REG_ARM_R0,
|
||||
PERF_REG_ARM_R1,
|
||||
PERF_REG_ARM_R2,
|
||||
PERF_REG_ARM_R3,
|
||||
PERF_REG_ARM_R4,
|
||||
PERF_REG_ARM_R5,
|
||||
PERF_REG_ARM_R6,
|
||||
PERF_REG_ARM_R7,
|
||||
PERF_REG_ARM_R8,
|
||||
PERF_REG_ARM_R9,
|
||||
PERF_REG_ARM_R10,
|
||||
PERF_REG_ARM_FP,
|
||||
PERF_REG_ARM_IP,
|
||||
PERF_REG_ARM_SP,
|
||||
PERF_REG_ARM_LR,
|
||||
PERF_REG_ARM_PC,
|
||||
PERF_REG_ARM_MAX,
|
||||
};
|
||||
#endif /* _ASM_ARM_PERF_REGS_H */
|
23
tools/arch/arm64/include/uapi/asm/bitsperlong.h
Normal file
23
tools/arch/arm64/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
/*
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_BITSPERLONG_H
|
||||
#define __ASM_BITSPERLONG_H
|
||||
|
||||
#define __BITS_PER_LONG 64
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_BITSPERLONG_H */
|
258
tools/arch/arm64/include/uapi/asm/kvm.h
Normal file
258
tools/arch/arm64/include/uapi/asm/kvm.h
Normal file
|
@ -0,0 +1,258 @@
|
|||
/*
|
||||
* Copyright (C) 2012,2013 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* Derived from arch/arm/include/uapi/asm/kvm.h:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_H__
|
||||
#define __ARM_KVM_H__
|
||||
|
||||
#define KVM_SPSR_EL1 0
|
||||
#define KVM_SPSR_SVC KVM_SPSR_EL1
|
||||
#define KVM_SPSR_ABT 1
|
||||
#define KVM_SPSR_UND 2
|
||||
#define KVM_SPSR_IRQ 3
|
||||
#define KVM_SPSR_FIQ 4
|
||||
#define KVM_NR_SPSR 5
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/psci.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
#define KVM_REG_SIZE(id) \
|
||||
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
|
||||
|
||||
struct kvm_regs {
|
||||
struct user_pt_regs regs; /* sp = sp_el0 */
|
||||
|
||||
__u64 sp_el1;
|
||||
__u64 elr_el1;
|
||||
|
||||
__u64 spsr[KVM_NR_SPSR];
|
||||
|
||||
struct user_fpsimd_state fp_regs;
|
||||
};
|
||||
|
||||
/*
|
||||
* Supported CPU Targets - Adding a new target type is not recommended,
|
||||
* unless there are some special registers not supported by the
|
||||
* genericv8 syreg table.
|
||||
*/
|
||||
#define KVM_ARM_TARGET_AEM_V8 0
|
||||
#define KVM_ARM_TARGET_FOUNDATION_V8 1
|
||||
#define KVM_ARM_TARGET_CORTEX_A57 2
|
||||
#define KVM_ARM_TARGET_XGENE_POTENZA 3
|
||||
#define KVM_ARM_TARGET_CORTEX_A53 4
|
||||
/* Generic ARM v8 target */
|
||||
#define KVM_ARM_TARGET_GENERIC_V8 5
|
||||
|
||||
#define KVM_ARM_NUM_TARGETS 6
|
||||
|
||||
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
|
||||
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_SHIFT 16
|
||||
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
|
||||
|
||||
/* Supported device IDs */
|
||||
#define KVM_ARM_DEVICE_VGIC_V2 0
|
||||
|
||||
/* Supported VGIC address types */
|
||||
#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
|
||||
#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
|
||||
|
||||
#define KVM_VGIC_V2_DIST_SIZE 0x1000
|
||||
#define KVM_VGIC_V2_CPU_SIZE 0x2000
|
||||
|
||||
/* Supported VGICv3 address types */
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
|
||||
|
||||
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
|
||||
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
|
||||
|
||||
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
|
||||
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
|
||||
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
|
||||
#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
|
||||
|
||||
struct kvm_vcpu_init {
|
||||
__u32 target;
|
||||
__u32 features[7];
|
||||
};
|
||||
|
||||
struct kvm_sregs {
|
||||
};
|
||||
|
||||
struct kvm_fpu {
|
||||
};
|
||||
|
||||
/*
|
||||
* See v8 ARM ARM D7.3: Debug Registers
|
||||
*
|
||||
* The architectural limit is 16 debug registers of each type although
|
||||
* in practice there are usually less (see ID_AA64DFR0_EL1).
|
||||
*
|
||||
* Although the control registers are architecturally defined as 32
|
||||
* bits wide we use a 64 bit structure here to keep parity with
|
||||
* KVM_GET/SET_ONE_REG behaviour which treats all system registers as
|
||||
* 64 bit values. It also allows for the possibility of the
|
||||
* architecture expanding the control registers without having to
|
||||
* change the userspace ABI.
|
||||
*/
|
||||
#define KVM_ARM_MAX_DBG_REGS 16
|
||||
struct kvm_guest_debug_arch {
|
||||
__u64 dbg_bcr[KVM_ARM_MAX_DBG_REGS];
|
||||
__u64 dbg_bvr[KVM_ARM_MAX_DBG_REGS];
|
||||
__u64 dbg_wcr[KVM_ARM_MAX_DBG_REGS];
|
||||
__u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
|
||||
};
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
__u32 hsr;
|
||||
__u64 far; /* used for watchpoints */
|
||||
};
|
||||
|
||||
/*
|
||||
* Architecture specific defines for kvm_guest_debug->control
|
||||
*/
|
||||
|
||||
#define KVM_GUESTDBG_USE_SW_BP (1 << 16)
|
||||
#define KVM_GUESTDBG_USE_HW (1 << 17)
|
||||
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
/* If you need to interpret the index values, here is the key: */
|
||||
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
|
||||
#define KVM_REG_ARM_COPROC_SHIFT 16
|
||||
|
||||
/* Normal registers are mapped as coprocessor 16. */
|
||||
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / sizeof(__u32))
|
||||
|
||||
/* Some registers need more space to represent values. */
|
||||
#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
|
||||
#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
|
||||
#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
|
||||
#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
|
||||
|
||||
/* AArch64 system registers */
|
||||
#define KVM_REG_ARM64_SYSREG (0x0013 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM64_SYSREG_OP0_MASK 0x000000000000c000
|
||||
#define KVM_REG_ARM64_SYSREG_OP0_SHIFT 14
|
||||
#define KVM_REG_ARM64_SYSREG_OP1_MASK 0x0000000000003800
|
||||
#define KVM_REG_ARM64_SYSREG_OP1_SHIFT 11
|
||||
#define KVM_REG_ARM64_SYSREG_CRN_MASK 0x0000000000000780
|
||||
#define KVM_REG_ARM64_SYSREG_CRN_SHIFT 7
|
||||
#define KVM_REG_ARM64_SYSREG_CRM_MASK 0x0000000000000078
|
||||
#define KVM_REG_ARM64_SYSREG_CRM_SHIFT 3
|
||||
#define KVM_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007
|
||||
#define KVM_REG_ARM64_SYSREG_OP2_SHIFT 0
|
||||
|
||||
#define ARM64_SYS_REG_SHIFT_MASK(x,n) \
|
||||
(((x) << KVM_REG_ARM64_SYSREG_ ## n ## _SHIFT) & \
|
||||
KVM_REG_ARM64_SYSREG_ ## n ## _MASK)
|
||||
|
||||
#define __ARM64_SYS_REG(op0,op1,crn,crm,op2) \
|
||||
(KVM_REG_ARM64 | KVM_REG_ARM64_SYSREG | \
|
||||
ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \
|
||||
ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \
|
||||
ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \
|
||||
ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \
|
||||
ARM64_SYS_REG_SHIFT_MASK(op2, OP2))
|
||||
|
||||
#define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64)
|
||||
|
||||
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
|
||||
|
||||
/* Device Control API: ARM VGIC */
|
||||
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
|
||||
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
|
||||
#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
|
||||
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
|
||||
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
|
||||
|
||||
/* Device Control API on vcpu fd */
|
||||
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
|
||||
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
|
||||
#define KVM_ARM_VCPU_PMU_V3_INIT 1
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_TYPE_SHIFT 24
|
||||
#define KVM_ARM_IRQ_TYPE_MASK 0xff
|
||||
#define KVM_ARM_IRQ_VCPU_SHIFT 16
|
||||
#define KVM_ARM_IRQ_VCPU_MASK 0xff
|
||||
#define KVM_ARM_IRQ_NUM_SHIFT 0
|
||||
#define KVM_ARM_IRQ_NUM_MASK 0xffff
|
||||
|
||||
/* irq_type field */
|
||||
#define KVM_ARM_IRQ_TYPE_CPU 0
|
||||
#define KVM_ARM_IRQ_TYPE_SPI 1
|
||||
#define KVM_ARM_IRQ_TYPE_PPI 2
|
||||
|
||||
/* out-of-kernel GIC cpu interrupt injection irq_number field */
|
||||
#define KVM_ARM_IRQ_CPU_IRQ 0
|
||||
#define KVM_ARM_IRQ_CPU_FIQ 1
|
||||
|
||||
/*
|
||||
* This used to hold the highest supported SPI, but it is now obsolete
|
||||
* and only here to provide source code level compatibility with older
|
||||
* userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
|
||||
*/
|
||||
#ifndef __KERNEL__
|
||||
#define KVM_ARM_IRQ_GIC_MAX 127
|
||||
#endif
|
||||
|
||||
/* One single KVM irqchip, ie. the VGIC */
|
||||
#define KVM_NR_IRQCHIPS 1
|
||||
|
||||
/* PSCI interface */
|
||||
#define KVM_PSCI_FN_BASE 0x95c1ba5e
|
||||
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
|
||||
|
||||
#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
|
||||
#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
|
||||
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
|
||||
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
|
||||
|
||||
#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
|
||||
#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
|
||||
#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
|
||||
#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_H__ */
|
40
tools/arch/arm64/include/uapi/asm/perf_regs.h
Normal file
40
tools/arch/arm64/include/uapi/asm/perf_regs.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
#ifndef _ASM_ARM64_PERF_REGS_H
|
||||
#define _ASM_ARM64_PERF_REGS_H
|
||||
|
||||
enum perf_event_arm_regs {
|
||||
PERF_REG_ARM64_X0,
|
||||
PERF_REG_ARM64_X1,
|
||||
PERF_REG_ARM64_X2,
|
||||
PERF_REG_ARM64_X3,
|
||||
PERF_REG_ARM64_X4,
|
||||
PERF_REG_ARM64_X5,
|
||||
PERF_REG_ARM64_X6,
|
||||
PERF_REG_ARM64_X7,
|
||||
PERF_REG_ARM64_X8,
|
||||
PERF_REG_ARM64_X9,
|
||||
PERF_REG_ARM64_X10,
|
||||
PERF_REG_ARM64_X11,
|
||||
PERF_REG_ARM64_X12,
|
||||
PERF_REG_ARM64_X13,
|
||||
PERF_REG_ARM64_X14,
|
||||
PERF_REG_ARM64_X15,
|
||||
PERF_REG_ARM64_X16,
|
||||
PERF_REG_ARM64_X17,
|
||||
PERF_REG_ARM64_X18,
|
||||
PERF_REG_ARM64_X19,
|
||||
PERF_REG_ARM64_X20,
|
||||
PERF_REG_ARM64_X21,
|
||||
PERF_REG_ARM64_X22,
|
||||
PERF_REG_ARM64_X23,
|
||||
PERF_REG_ARM64_X24,
|
||||
PERF_REG_ARM64_X25,
|
||||
PERF_REG_ARM64_X26,
|
||||
PERF_REG_ARM64_X27,
|
||||
PERF_REG_ARM64_X28,
|
||||
PERF_REG_ARM64_X29,
|
||||
PERF_REG_ARM64_LR,
|
||||
PERF_REG_ARM64_SP,
|
||||
PERF_REG_ARM64_PC,
|
||||
PERF_REG_ARM64_MAX,
|
||||
};
|
||||
#endif /* _ASM_ARM64_PERF_REGS_H */
|
1
tools/arch/frv/include/uapi/asm/bitsperlong.h
Normal file
1
tools/arch/frv/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/bitsperlong.h>
|
14
tools/arch/h8300/include/asm/bitsperlong.h
Normal file
14
tools/arch/h8300/include/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,14 @@
|
|||
#ifndef __ASM_H8300_BITS_PER_LONG
|
||||
#define __ASM_H8300_BITS_PER_LONG
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#if !defined(__ASSEMBLY__)
|
||||
/* h8300-unknown-linux required long */
|
||||
#define __kernel_size_t __kernel_size_t
|
||||
typedef unsigned long __kernel_size_t;
|
||||
typedef long __kernel_ssize_t;
|
||||
typedef long __kernel_ptrdiff_t;
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_H8300_BITS_PER_LONG */
|
26
tools/arch/hexagon/include/uapi/asm/bitsperlong.h
Normal file
26
tools/arch/hexagon/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
* 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_HEXAGON_BITSPERLONG_H
|
||||
#define __ASM_HEXAGON_BITSPERLONG_H
|
||||
|
||||
#define __BITS_PER_LONG 32
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif
|
8
tools/arch/ia64/include/uapi/asm/bitsperlong.h
Normal file
8
tools/arch/ia64/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,8 @@
|
|||
#ifndef __ASM_IA64_BITSPERLONG_H
|
||||
#define __ASM_IA64_BITSPERLONG_H
|
||||
|
||||
#define __BITS_PER_LONG 64
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_IA64_BITSPERLONG_H */
|
1
tools/arch/m32r/include/uapi/asm/bitsperlong.h
Normal file
1
tools/arch/m32r/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/bitsperlong.h>
|
1
tools/arch/microblaze/include/uapi/asm/bitsperlong.h
Normal file
1
tools/arch/microblaze/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/bitsperlong.h>
|
8
tools/arch/mips/include/uapi/asm/bitsperlong.h
Normal file
8
tools/arch/mips/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,8 @@
|
|||
#ifndef __ASM_MIPS_BITSPERLONG_H
|
||||
#define __ASM_MIPS_BITSPERLONG_H
|
||||
|
||||
#define __BITS_PER_LONG _MIPS_SZLONG
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_MIPS_BITSPERLONG_H */
|
208
tools/arch/mips/include/uapi/asm/kvm.h
Normal file
208
tools/arch/mips/include/uapi/asm/kvm.h
Normal file
|
@ -0,0 +1,208 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Copyright (C) 2013 Cavium, Inc.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_KVM_MIPS_H
|
||||
#define __LINUX_KVM_MIPS_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/*
|
||||
* KVM MIPS specific structures and definitions.
|
||||
*
|
||||
* Some parts derived from the x86 version of this file.
|
||||
*/
|
||||
|
||||
/*
|
||||
* for KVM_GET_REGS and KVM_SET_REGS
|
||||
*
|
||||
* If Config[AT] is zero (32-bit CPU), the register contents are
|
||||
* stored in the lower 32-bits of the struct kvm_regs fields and sign
|
||||
* extended to 64-bits.
|
||||
*/
|
||||
struct kvm_regs {
|
||||
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
|
||||
__u64 gpr[32];
|
||||
__u64 hi;
|
||||
__u64 lo;
|
||||
__u64 pc;
|
||||
};
|
||||
|
||||
/*
|
||||
* for KVM_GET_FPU and KVM_SET_FPU
|
||||
*/
|
||||
struct kvm_fpu {
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various
|
||||
* registers. The id field is broken down as follows:
|
||||
*
|
||||
* bits[63..52] - As per linux/kvm.h
|
||||
* bits[51..32] - Must be zero.
|
||||
* bits[31..16] - Register set.
|
||||
*
|
||||
* Register set = 0: GP registers from kvm_regs (see definitions below).
|
||||
*
|
||||
* Register set = 1: CP0 registers.
|
||||
* bits[15..8] - Must be zero.
|
||||
* bits[7..3] - Register 'rd' index.
|
||||
* bits[2..0] - Register 'sel' index.
|
||||
*
|
||||
* Register set = 2: KVM specific registers (see definitions below).
|
||||
*
|
||||
* Register set = 3: FPU / MSA registers (see definitions below).
|
||||
*
|
||||
* Other sets registers may be added in the future. Each set would
|
||||
* have its own identifier in bits[31..16].
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_GP (KVM_REG_MIPS | 0x0000000000000000ULL)
|
||||
#define KVM_REG_MIPS_CP0 (KVM_REG_MIPS | 0x0000000000010000ULL)
|
||||
#define KVM_REG_MIPS_KVM (KVM_REG_MIPS | 0x0000000000020000ULL)
|
||||
#define KVM_REG_MIPS_FPU (KVM_REG_MIPS | 0x0000000000030000ULL)
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_GP - General purpose registers from kvm_regs.
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_R0 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 0)
|
||||
#define KVM_REG_MIPS_R1 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 1)
|
||||
#define KVM_REG_MIPS_R2 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 2)
|
||||
#define KVM_REG_MIPS_R3 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 3)
|
||||
#define KVM_REG_MIPS_R4 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 4)
|
||||
#define KVM_REG_MIPS_R5 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 5)
|
||||
#define KVM_REG_MIPS_R6 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 6)
|
||||
#define KVM_REG_MIPS_R7 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 7)
|
||||
#define KVM_REG_MIPS_R8 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 8)
|
||||
#define KVM_REG_MIPS_R9 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 9)
|
||||
#define KVM_REG_MIPS_R10 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 10)
|
||||
#define KVM_REG_MIPS_R11 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 11)
|
||||
#define KVM_REG_MIPS_R12 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 12)
|
||||
#define KVM_REG_MIPS_R13 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 13)
|
||||
#define KVM_REG_MIPS_R14 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 14)
|
||||
#define KVM_REG_MIPS_R15 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 15)
|
||||
#define KVM_REG_MIPS_R16 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 16)
|
||||
#define KVM_REG_MIPS_R17 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 17)
|
||||
#define KVM_REG_MIPS_R18 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 18)
|
||||
#define KVM_REG_MIPS_R19 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 19)
|
||||
#define KVM_REG_MIPS_R20 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 20)
|
||||
#define KVM_REG_MIPS_R21 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 21)
|
||||
#define KVM_REG_MIPS_R22 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 22)
|
||||
#define KVM_REG_MIPS_R23 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 23)
|
||||
#define KVM_REG_MIPS_R24 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 24)
|
||||
#define KVM_REG_MIPS_R25 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 25)
|
||||
#define KVM_REG_MIPS_R26 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 26)
|
||||
#define KVM_REG_MIPS_R27 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 27)
|
||||
#define KVM_REG_MIPS_R28 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 28)
|
||||
#define KVM_REG_MIPS_R29 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 29)
|
||||
#define KVM_REG_MIPS_R30 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 30)
|
||||
#define KVM_REG_MIPS_R31 (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 31)
|
||||
|
||||
#define KVM_REG_MIPS_HI (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 32)
|
||||
#define KVM_REG_MIPS_LO (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 33)
|
||||
#define KVM_REG_MIPS_PC (KVM_REG_MIPS_GP | KVM_REG_SIZE_U64 | 34)
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_KVM - KVM specific control registers.
|
||||
*/
|
||||
|
||||
/*
|
||||
* CP0_Count control
|
||||
* DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now
|
||||
* Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
|
||||
* interrupts since COUNT_RESUME
|
||||
* This can be used to freeze the timer to get a consistent snapshot of
|
||||
* the CP0_Count and timer interrupt pending state, while also resuming
|
||||
* safely without losing time or guest timer interrupts.
|
||||
* Other: Reserved, do not change.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 0)
|
||||
#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
|
||||
|
||||
/*
|
||||
* CP0_Count resume monotonic nanoseconds
|
||||
* The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
|
||||
* disable). Any reads and writes of Count related registers while
|
||||
* COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
|
||||
* cleared again (master enable) any timer interrupts since this time will be
|
||||
* emulated.
|
||||
* Modifications to times in the future are rejected.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 1)
|
||||
/*
|
||||
* CP0_Count rate in Hz
|
||||
* Specifies the rate of the CP0_Count timer in Hz. Modifications occur without
|
||||
* discontinuities in CP0_Count.
|
||||
*/
|
||||
#define KVM_REG_MIPS_COUNT_HZ (KVM_REG_MIPS_KVM | KVM_REG_SIZE_U64 | 2)
|
||||
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_FPU - Floating Point and MIPS SIMD Architecture (MSA) registers.
|
||||
*
|
||||
* bits[15..8] - Register subset (see definitions below).
|
||||
* bits[7..5] - Must be zero.
|
||||
* bits[4..0] - Register number within register subset.
|
||||
*/
|
||||
|
||||
#define KVM_REG_MIPS_FPR (KVM_REG_MIPS_FPU | 0x0000000000000000ULL)
|
||||
#define KVM_REG_MIPS_FCR (KVM_REG_MIPS_FPU | 0x0000000000000100ULL)
|
||||
#define KVM_REG_MIPS_MSACR (KVM_REG_MIPS_FPU | 0x0000000000000200ULL)
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_FPR - Floating point / Vector registers.
|
||||
*/
|
||||
#define KVM_REG_MIPS_FPR_32(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U32 | (n))
|
||||
#define KVM_REG_MIPS_FPR_64(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U64 | (n))
|
||||
#define KVM_REG_MIPS_VEC_128(n) (KVM_REG_MIPS_FPR | KVM_REG_SIZE_U128 | (n))
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_FCR - Floating point control registers.
|
||||
*/
|
||||
#define KVM_REG_MIPS_FCR_IR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 0)
|
||||
#define KVM_REG_MIPS_FCR_CSR (KVM_REG_MIPS_FCR | KVM_REG_SIZE_U32 | 31)
|
||||
|
||||
/*
|
||||
* KVM_REG_MIPS_MSACR - MIPS SIMD Architecture (MSA) control registers.
|
||||
*/
|
||||
#define KVM_REG_MIPS_MSA_IR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 0)
|
||||
#define KVM_REG_MIPS_MSA_CSR (KVM_REG_MIPS_MSACR | KVM_REG_SIZE_U32 | 1)
|
||||
|
||||
|
||||
/*
|
||||
* KVM MIPS specific structures and definitions
|
||||
*
|
||||
*/
|
||||
struct kvm_debug_exit_arch {
|
||||
__u64 epc;
|
||||
};
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
struct kvm_guest_debug_arch {
|
||||
};
|
||||
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
/* dummy definition */
|
||||
struct kvm_sregs {
|
||||
};
|
||||
|
||||
struct kvm_mips_interrupt {
|
||||
/* in */
|
||||
__u32 cpu;
|
||||
__u32 irq;
|
||||
};
|
||||
|
||||
#endif /* __LINUX_KVM_MIPS_H */
|
1
tools/arch/mn10300/include/uapi/asm/bitsperlong.h
Normal file
1
tools/arch/mn10300/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1 @@
|
|||
#include <asm-generic/bitsperlong.h>
|
14
tools/arch/parisc/include/uapi/asm/bitsperlong.h
Normal file
14
tools/arch/parisc/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,14 @@
|
|||
#ifndef __ASM_PARISC_BITSPERLONG_H
|
||||
#define __ASM_PARISC_BITSPERLONG_H
|
||||
|
||||
#if defined(__LP64__)
|
||||
#define __BITS_PER_LONG 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define __BITS_PER_LONG 32
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_PARISC_BITSPERLONG_H */
|
12
tools/arch/powerpc/include/uapi/asm/bitsperlong.h
Normal file
12
tools/arch/powerpc/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef __ASM_POWERPC_BITSPERLONG_H
|
||||
#define __ASM_POWERPC_BITSPERLONG_H
|
||||
|
||||
#if defined(__powerpc64__)
|
||||
# define __BITS_PER_LONG 64
|
||||
#else
|
||||
# define __BITS_PER_LONG 32
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_POWERPC_BITSPERLONG_H */
|
612
tools/arch/powerpc/include/uapi/asm/kvm.h
Normal file
612
tools/arch/powerpc/include/uapi/asm/kvm.h
Normal file
|
@ -0,0 +1,612 @@
|
|||
/*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
*
|
||||
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_KVM_POWERPC_H
|
||||
#define __LINUX_KVM_POWERPC_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Select powerpc specific features in <linux/kvm.h> */
|
||||
#define __KVM_HAVE_SPAPR_TCE
|
||||
#define __KVM_HAVE_PPC_SMT
|
||||
#define __KVM_HAVE_IRQCHIP
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
|
||||
struct kvm_regs {
|
||||
__u64 pc;
|
||||
__u64 cr;
|
||||
__u64 ctr;
|
||||
__u64 lr;
|
||||
__u64 xer;
|
||||
__u64 msr;
|
||||
__u64 srr0;
|
||||
__u64 srr1;
|
||||
__u64 pid;
|
||||
|
||||
__u64 sprg0;
|
||||
__u64 sprg1;
|
||||
__u64 sprg2;
|
||||
__u64 sprg3;
|
||||
__u64 sprg4;
|
||||
__u64 sprg5;
|
||||
__u64 sprg6;
|
||||
__u64 sprg7;
|
||||
|
||||
__u64 gpr[32];
|
||||
};
|
||||
|
||||
#define KVM_SREGS_E_IMPL_NONE 0
|
||||
#define KVM_SREGS_E_IMPL_FSL 1
|
||||
|
||||
#define KVM_SREGS_E_FSL_PIDn (1 << 0) /* PID1/PID2 */
|
||||
|
||||
/*
|
||||
* Feature bits indicate which sections of the sregs struct are valid,
|
||||
* both in KVM_GET_SREGS and KVM_SET_SREGS. On KVM_SET_SREGS, registers
|
||||
* corresponding to unset feature bits will not be modified. This allows
|
||||
* restoring a checkpoint made without that feature, while keeping the
|
||||
* default values of the new registers.
|
||||
*
|
||||
* KVM_SREGS_E_BASE contains:
|
||||
* CSRR0/1 (refers to SRR2/3 on 40x)
|
||||
* ESR
|
||||
* DEAR
|
||||
* MCSR
|
||||
* TSR
|
||||
* TCR
|
||||
* DEC
|
||||
* TB
|
||||
* VRSAVE (USPRG0)
|
||||
*/
|
||||
#define KVM_SREGS_E_BASE (1 << 0)
|
||||
|
||||
/*
|
||||
* KVM_SREGS_E_ARCH206 contains:
|
||||
*
|
||||
* PIR
|
||||
* MCSRR0/1
|
||||
* DECAR
|
||||
* IVPR
|
||||
*/
|
||||
#define KVM_SREGS_E_ARCH206 (1 << 1)
|
||||
|
||||
/*
|
||||
* Contains EPCR, plus the upper half of 64-bit registers
|
||||
* that are 32-bit on 32-bit implementations.
|
||||
*/
|
||||
#define KVM_SREGS_E_64 (1 << 2)
|
||||
|
||||
#define KVM_SREGS_E_SPRG8 (1 << 3)
|
||||
#define KVM_SREGS_E_MCIVPR (1 << 4)
|
||||
|
||||
/*
|
||||
* IVORs are used -- contains IVOR0-15, plus additional IVORs
|
||||
* in combination with an appropriate feature bit.
|
||||
*/
|
||||
#define KVM_SREGS_E_IVOR (1 << 5)
|
||||
|
||||
/*
|
||||
* Contains MAS0-4, MAS6-7, TLBnCFG, MMUCFG.
|
||||
* Also TLBnPS if MMUCFG[MAVN] = 1.
|
||||
*/
|
||||
#define KVM_SREGS_E_ARCH206_MMU (1 << 6)
|
||||
|
||||
/* DBSR, DBCR, IAC, DAC, DVC */
|
||||
#define KVM_SREGS_E_DEBUG (1 << 7)
|
||||
|
||||
/* Enhanced debug -- DSRR0/1, SPRG9 */
|
||||
#define KVM_SREGS_E_ED (1 << 8)
|
||||
|
||||
/* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */
|
||||
#define KVM_SREGS_E_SPE (1 << 9)
|
||||
|
||||
/*
|
||||
* DEPRECATED! USE ONE_REG FOR THIS ONE!
|
||||
* External Proxy (EXP) -- EPR
|
||||
*/
|
||||
#define KVM_SREGS_EXP (1 << 10)
|
||||
|
||||
/* External PID (E.PD) -- EPSC/EPLC */
|
||||
#define KVM_SREGS_E_PD (1 << 11)
|
||||
|
||||
/* Processor Control (E.PC) -- IVOR36-37 if KVM_SREGS_E_IVOR */
|
||||
#define KVM_SREGS_E_PC (1 << 12)
|
||||
|
||||
/* Page table (E.PT) -- EPTCFG */
|
||||
#define KVM_SREGS_E_PT (1 << 13)
|
||||
|
||||
/* Embedded Performance Monitor (E.PM) -- IVOR35 if KVM_SREGS_E_IVOR */
|
||||
#define KVM_SREGS_E_PM (1 << 14)
|
||||
|
||||
/*
|
||||
* Special updates:
|
||||
*
|
||||
* Some registers may change even while a vcpu is not running.
|
||||
* To avoid losing these changes, by default these registers are
|
||||
* not updated by KVM_SET_SREGS. To force an update, set the bit
|
||||
* in u.e.update_special corresponding to the register to be updated.
|
||||
*
|
||||
* The update_special field is zero on return from KVM_GET_SREGS.
|
||||
*
|
||||
* When restoring a checkpoint, the caller can set update_special
|
||||
* to 0xffffffff to ensure that everything is restored, even new features
|
||||
* that the caller doesn't know about.
|
||||
*/
|
||||
#define KVM_SREGS_E_UPDATE_MCSR (1 << 0)
|
||||
#define KVM_SREGS_E_UPDATE_TSR (1 << 1)
|
||||
#define KVM_SREGS_E_UPDATE_DEC (1 << 2)
|
||||
#define KVM_SREGS_E_UPDATE_DBSR (1 << 3)
|
||||
|
||||
/*
|
||||
* In KVM_SET_SREGS, reserved/pad fields must be left untouched from a
|
||||
* previous KVM_GET_REGS.
|
||||
*
|
||||
* Unless otherwise indicated, setting any register with KVM_SET_SREGS
|
||||
* directly sets its value. It does not trigger any special semantics such
|
||||
* as write-one-to-clear. Calling KVM_SET_SREGS on an unmodified struct
|
||||
* just received from KVM_GET_SREGS is always a no-op.
|
||||
*/
|
||||
struct kvm_sregs {
|
||||
__u32 pvr;
|
||||
union {
|
||||
struct {
|
||||
__u64 sdr1;
|
||||
struct {
|
||||
struct {
|
||||
__u64 slbe;
|
||||
__u64 slbv;
|
||||
} slb[64];
|
||||
} ppc64;
|
||||
struct {
|
||||
__u32 sr[16];
|
||||
__u64 ibat[8];
|
||||
__u64 dbat[8];
|
||||
} ppc32;
|
||||
} s;
|
||||
struct {
|
||||
union {
|
||||
struct { /* KVM_SREGS_E_IMPL_FSL */
|
||||
__u32 features; /* KVM_SREGS_E_FSL_ */
|
||||
__u32 svr;
|
||||
__u64 mcar;
|
||||
__u32 hid0;
|
||||
|
||||
/* KVM_SREGS_E_FSL_PIDn */
|
||||
__u32 pid1, pid2;
|
||||
} fsl;
|
||||
__u8 pad[256];
|
||||
} impl;
|
||||
|
||||
__u32 features; /* KVM_SREGS_E_ */
|
||||
__u32 impl_id; /* KVM_SREGS_E_IMPL_ */
|
||||
__u32 update_special; /* KVM_SREGS_E_UPDATE_ */
|
||||
__u32 pir; /* read-only */
|
||||
__u64 sprg8;
|
||||
__u64 sprg9; /* E.ED */
|
||||
__u64 csrr0;
|
||||
__u64 dsrr0; /* E.ED */
|
||||
__u64 mcsrr0;
|
||||
__u32 csrr1;
|
||||
__u32 dsrr1; /* E.ED */
|
||||
__u32 mcsrr1;
|
||||
__u32 esr;
|
||||
__u64 dear;
|
||||
__u64 ivpr;
|
||||
__u64 mcivpr;
|
||||
__u64 mcsr; /* KVM_SREGS_E_UPDATE_MCSR */
|
||||
|
||||
__u32 tsr; /* KVM_SREGS_E_UPDATE_TSR */
|
||||
__u32 tcr;
|
||||
__u32 decar;
|
||||
__u32 dec; /* KVM_SREGS_E_UPDATE_DEC */
|
||||
|
||||
/*
|
||||
* Userspace can read TB directly, but the
|
||||
* value reported here is consistent with "dec".
|
||||
*
|
||||
* Read-only.
|
||||
*/
|
||||
__u64 tb;
|
||||
|
||||
__u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */
|
||||
__u32 dbcr[3];
|
||||
/*
|
||||
* iac/dac registers are 64bit wide, while this API
|
||||
* interface provides only lower 32 bits on 64 bit
|
||||
* processors. ONE_REG interface is added for 64bit
|
||||
* iac/dac registers.
|
||||
*/
|
||||
__u32 iac[4];
|
||||
__u32 dac[2];
|
||||
__u32 dvc[2];
|
||||
__u8 num_iac; /* read-only */
|
||||
__u8 num_dac; /* read-only */
|
||||
__u8 num_dvc; /* read-only */
|
||||
__u8 pad;
|
||||
|
||||
__u32 epr; /* EXP */
|
||||
__u32 vrsave; /* a.k.a. USPRG0 */
|
||||
__u32 epcr; /* KVM_SREGS_E_64 */
|
||||
|
||||
__u32 mas0;
|
||||
__u32 mas1;
|
||||
__u64 mas2;
|
||||
__u64 mas7_3;
|
||||
__u32 mas4;
|
||||
__u32 mas6;
|
||||
|
||||
__u32 ivor_low[16]; /* IVOR0-15 */
|
||||
__u32 ivor_high[18]; /* IVOR32+, plus room to expand */
|
||||
|
||||
__u32 mmucfg; /* read-only */
|
||||
__u32 eptcfg; /* E.PT, read-only */
|
||||
__u32 tlbcfg[4];/* read-only */
|
||||
__u32 tlbps[4]; /* read-only */
|
||||
|
||||
__u32 eplc, epsc; /* E.PD */
|
||||
} e;
|
||||
__u8 pad[1020];
|
||||
} u;
|
||||
};
|
||||
|
||||
struct kvm_fpu {
|
||||
__u64 fpr[32];
|
||||
};
|
||||
|
||||
/*
|
||||
* Defines for h/w breakpoint, watchpoint (read, write or both) and
|
||||
* software breakpoint.
|
||||
* These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status"
|
||||
* for KVM_DEBUG_EXIT.
|
||||
*/
|
||||
#define KVMPPC_DEBUG_NONE 0x0
|
||||
#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1)
|
||||
#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2)
|
||||
#define KVMPPC_DEBUG_WATCH_READ (1UL << 3)
|
||||
struct kvm_debug_exit_arch {
|
||||
__u64 address;
|
||||
/*
|
||||
* exiting to userspace because of h/w breakpoint, watchpoint
|
||||
* (read, write or both) and software breakpoint.
|
||||
*/
|
||||
__u32 status;
|
||||
__u32 reserved;
|
||||
};
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
struct kvm_guest_debug_arch {
|
||||
struct {
|
||||
/* H/W breakpoint/watchpoint address */
|
||||
__u64 addr;
|
||||
/*
|
||||
* Type denotes h/w breakpoint, read watchpoint, write
|
||||
* watchpoint or watchpoint (both read and write).
|
||||
*/
|
||||
__u32 type;
|
||||
__u32 reserved;
|
||||
} bp[16];
|
||||
};
|
||||
|
||||
/* Debug related defines */
|
||||
/*
|
||||
* kvm_guest_debug->control is a 32 bit field. The lower 16 bits are generic
|
||||
* and upper 16 bits are architecture specific. Architecture specific defines
|
||||
* that ioctl is for setting hardware breakpoint or software breakpoint.
|
||||
*/
|
||||
#define KVM_GUESTDBG_USE_SW_BP 0x00010000
|
||||
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
|
||||
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
#define KVM_INTERRUPT_SET -1U
|
||||
#define KVM_INTERRUPT_UNSET -2U
|
||||
#define KVM_INTERRUPT_SET_LEVEL -3U
|
||||
|
||||
#define KVM_CPU_440 1
|
||||
#define KVM_CPU_E500V2 2
|
||||
#define KVM_CPU_3S_32 3
|
||||
#define KVM_CPU_3S_64 4
|
||||
#define KVM_CPU_E500MC 5
|
||||
|
||||
/* for KVM_CAP_SPAPR_TCE */
|
||||
struct kvm_create_spapr_tce {
|
||||
__u64 liobn;
|
||||
__u32 window_size;
|
||||
};
|
||||
|
||||
/* for KVM_CAP_SPAPR_TCE_64 */
|
||||
struct kvm_create_spapr_tce_64 {
|
||||
__u64 liobn;
|
||||
__u32 page_shift;
|
||||
__u32 flags;
|
||||
__u64 offset; /* in pages */
|
||||
__u64 size; /* in pages */
|
||||
};
|
||||
|
||||
/* for KVM_ALLOCATE_RMA */
|
||||
struct kvm_allocate_rma {
|
||||
__u64 rma_size;
|
||||
};
|
||||
|
||||
/* for KVM_CAP_PPC_RTAS */
|
||||
struct kvm_rtas_token_args {
|
||||
char name[120];
|
||||
__u64 token; /* Use a token of 0 to undefine a mapping */
|
||||
};
|
||||
|
||||
struct kvm_book3e_206_tlb_entry {
|
||||
__u32 mas8;
|
||||
__u32 mas1;
|
||||
__u64 mas2;
|
||||
__u64 mas7_3;
|
||||
};
|
||||
|
||||
struct kvm_book3e_206_tlb_params {
|
||||
/*
|
||||
* For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
|
||||
*
|
||||
* - The number of ways of TLB0 must be a power of two between 2 and
|
||||
* 16.
|
||||
* - TLB1 must be fully associative.
|
||||
* - The size of TLB0 must be a multiple of the number of ways, and
|
||||
* the number of sets must be a power of two.
|
||||
* - The size of TLB1 may not exceed 64 entries.
|
||||
* - TLB0 supports 4 KiB pages.
|
||||
* - The page sizes supported by TLB1 are as indicated by
|
||||
* TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
|
||||
* as returned by KVM_GET_SREGS.
|
||||
* - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
|
||||
* and tlb_ways[] must be zero.
|
||||
*
|
||||
* tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
|
||||
*
|
||||
* KVM will adjust TLBnCFG based on the sizes configured here,
|
||||
* though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
|
||||
* set to zero.
|
||||
*/
|
||||
__u32 tlb_sizes[4];
|
||||
__u32 tlb_ways[4];
|
||||
__u32 reserved[8];
|
||||
};
|
||||
|
||||
/* For KVM_PPC_GET_HTAB_FD */
|
||||
struct kvm_get_htab_fd {
|
||||
__u64 flags;
|
||||
__u64 start_index;
|
||||
__u64 reserved[2];
|
||||
};
|
||||
|
||||
/* Values for kvm_get_htab_fd.flags */
|
||||
#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
|
||||
#define KVM_GET_HTAB_WRITE ((__u64)0x2)
|
||||
|
||||
/*
|
||||
* Data read on the file descriptor is formatted as a series of
|
||||
* records, each consisting of a header followed by a series of
|
||||
* `n_valid' HPTEs (16 bytes each), which are all valid. Following
|
||||
* those valid HPTEs there are `n_invalid' invalid HPTEs, which
|
||||
* are not represented explicitly in the stream. The same format
|
||||
* is used for writing.
|
||||
*/
|
||||
struct kvm_get_htab_header {
|
||||
__u32 index;
|
||||
__u16 n_valid;
|
||||
__u16 n_invalid;
|
||||
};
|
||||
|
||||
/* Per-vcpu XICS interrupt controller state */
|
||||
#define KVM_REG_PPC_ICP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8c)
|
||||
|
||||
#define KVM_REG_PPC_ICP_CPPR_SHIFT 56 /* current proc priority */
|
||||
#define KVM_REG_PPC_ICP_CPPR_MASK 0xff
|
||||
#define KVM_REG_PPC_ICP_XISR_SHIFT 32 /* interrupt status field */
|
||||
#define KVM_REG_PPC_ICP_XISR_MASK 0xffffff
|
||||
#define KVM_REG_PPC_ICP_MFRR_SHIFT 24 /* pending IPI priority */
|
||||
#define KVM_REG_PPC_ICP_MFRR_MASK 0xff
|
||||
#define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */
|
||||
#define KVM_REG_PPC_ICP_PPRI_MASK 0xff
|
||||
|
||||
/* Device control API: PPC-specific devices */
|
||||
#define KVM_DEV_MPIC_GRP_MISC 1
|
||||
#define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */
|
||||
|
||||
#define KVM_DEV_MPIC_GRP_REGISTER 2 /* 32-bit */
|
||||
#define KVM_DEV_MPIC_GRP_IRQ_ACTIVE 3 /* 32-bit */
|
||||
|
||||
/* One-Reg API: PPC-specific registers */
|
||||
#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
|
||||
#define KVM_REG_PPC_IAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x2)
|
||||
#define KVM_REG_PPC_IAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3)
|
||||
#define KVM_REG_PPC_IAC3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x4)
|
||||
#define KVM_REG_PPC_IAC4 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x5)
|
||||
#define KVM_REG_PPC_DAC1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x6)
|
||||
#define KVM_REG_PPC_DAC2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x7)
|
||||
#define KVM_REG_PPC_DABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8)
|
||||
#define KVM_REG_PPC_DSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9)
|
||||
#define KVM_REG_PPC_PURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa)
|
||||
#define KVM_REG_PPC_SPURR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb)
|
||||
#define KVM_REG_PPC_DAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xc)
|
||||
#define KVM_REG_PPC_DSISR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xd)
|
||||
#define KVM_REG_PPC_AMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xe)
|
||||
#define KVM_REG_PPC_UAMOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xf)
|
||||
|
||||
#define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10)
|
||||
#define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11)
|
||||
#define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12)
|
||||
#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13)
|
||||
#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14)
|
||||
#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15)
|
||||
#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16)
|
||||
#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17)
|
||||
|
||||
#define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18)
|
||||
#define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19)
|
||||
#define KVM_REG_PPC_PMC3 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1a)
|
||||
#define KVM_REG_PPC_PMC4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1b)
|
||||
#define KVM_REG_PPC_PMC5 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1c)
|
||||
#define KVM_REG_PPC_PMC6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1d)
|
||||
#define KVM_REG_PPC_PMC7 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1e)
|
||||
#define KVM_REG_PPC_PMC8 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x1f)
|
||||
|
||||
/* 32 floating-point registers */
|
||||
#define KVM_REG_PPC_FPR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x20)
|
||||
#define KVM_REG_PPC_FPR(n) (KVM_REG_PPC_FPR0 + (n))
|
||||
#define KVM_REG_PPC_FPR31 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x3f)
|
||||
|
||||
/* 32 VMX/Altivec vector registers */
|
||||
#define KVM_REG_PPC_VR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x40)
|
||||
#define KVM_REG_PPC_VR(n) (KVM_REG_PPC_VR0 + (n))
|
||||
#define KVM_REG_PPC_VR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x5f)
|
||||
|
||||
/* 32 double-width FP registers for VSX */
|
||||
/* High-order halves overlap with FP regs */
|
||||
#define KVM_REG_PPC_VSR0 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x60)
|
||||
#define KVM_REG_PPC_VSR(n) (KVM_REG_PPC_VSR0 + (n))
|
||||
#define KVM_REG_PPC_VSR31 (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x7f)
|
||||
|
||||
/* FP and vector status/control registers */
|
||||
#define KVM_REG_PPC_FPSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x80)
|
||||
/*
|
||||
* VSCR register is documented as a 32-bit register in the ISA, but it can
|
||||
* only be accesses via a vector register. Expose VSCR as a 32-bit register
|
||||
* even though the kernel represents it as a 128-bit vector.
|
||||
*/
|
||||
#define KVM_REG_PPC_VSCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x81)
|
||||
|
||||
/* Virtual processor areas */
|
||||
/* For SLB & DTL, address in high (first) half, length in low half */
|
||||
#define KVM_REG_PPC_VPA_ADDR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x82)
|
||||
#define KVM_REG_PPC_VPA_SLB (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x83)
|
||||
#define KVM_REG_PPC_VPA_DTL (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x84)
|
||||
|
||||
#define KVM_REG_PPC_EPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x85)
|
||||
#define KVM_REG_PPC_EPR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x86)
|
||||
|
||||
/* Timer Status Register OR/CLEAR interface */
|
||||
#define KVM_REG_PPC_OR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x87)
|
||||
#define KVM_REG_PPC_CLEAR_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x88)
|
||||
#define KVM_REG_PPC_TCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x89)
|
||||
#define KVM_REG_PPC_TSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8a)
|
||||
|
||||
/* Debugging: Special instruction for software breakpoint */
|
||||
#define KVM_REG_PPC_DEBUG_INST (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8b)
|
||||
|
||||
/* MMU registers */
|
||||
#define KVM_REG_PPC_MAS0 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8c)
|
||||
#define KVM_REG_PPC_MAS1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x8d)
|
||||
#define KVM_REG_PPC_MAS2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8e)
|
||||
#define KVM_REG_PPC_MAS7_3 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x8f)
|
||||
#define KVM_REG_PPC_MAS4 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x90)
|
||||
#define KVM_REG_PPC_MAS6 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x91)
|
||||
#define KVM_REG_PPC_MMUCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x92)
|
||||
/*
|
||||
* TLBnCFG fields TLBnCFG_N_ENTRY and TLBnCFG_ASSOC can be changed only using
|
||||
* KVM_CAP_SW_TLB ioctl
|
||||
*/
|
||||
#define KVM_REG_PPC_TLB0CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x93)
|
||||
#define KVM_REG_PPC_TLB1CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x94)
|
||||
#define KVM_REG_PPC_TLB2CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x95)
|
||||
#define KVM_REG_PPC_TLB3CFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x96)
|
||||
#define KVM_REG_PPC_TLB0PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x97)
|
||||
#define KVM_REG_PPC_TLB1PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x98)
|
||||
#define KVM_REG_PPC_TLB2PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x99)
|
||||
#define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a)
|
||||
#define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b)
|
||||
|
||||
/* Timebase offset */
|
||||
#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c)
|
||||
|
||||
/* POWER8 registers */
|
||||
#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d)
|
||||
#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e)
|
||||
#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f)
|
||||
#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0)
|
||||
#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1)
|
||||
#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2)
|
||||
#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3)
|
||||
#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4)
|
||||
#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5)
|
||||
#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6)
|
||||
#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7)
|
||||
#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8)
|
||||
#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9)
|
||||
#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa)
|
||||
#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab)
|
||||
#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac)
|
||||
#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad)
|
||||
#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae)
|
||||
#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf)
|
||||
#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0)
|
||||
#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1)
|
||||
#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2)
|
||||
#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3)
|
||||
|
||||
#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4)
|
||||
#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5)
|
||||
#define KVM_REG_PPC_LPCR_64 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb5)
|
||||
#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6)
|
||||
|
||||
/* Architecture compatibility level */
|
||||
#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7)
|
||||
|
||||
#define KVM_REG_PPC_DABRX (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb8)
|
||||
#define KVM_REG_PPC_WORT (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb9)
|
||||
#define KVM_REG_PPC_SPRG9 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xba)
|
||||
#define KVM_REG_PPC_DBSR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbb)
|
||||
|
||||
/* Transactional Memory checkpointed state:
|
||||
* This is all GPRs, all VSX regs and a subset of SPRs
|
||||
*/
|
||||
#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000)
|
||||
/* TM GPRs */
|
||||
#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0)
|
||||
#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n))
|
||||
#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f)
|
||||
/* TM VSX */
|
||||
#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20)
|
||||
#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n))
|
||||
#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f)
|
||||
/* TM SPRS */
|
||||
#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60)
|
||||
#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61)
|
||||
#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62)
|
||||
#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63)
|
||||
#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64)
|
||||
#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65)
|
||||
#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66)
|
||||
#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67)
|
||||
#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68)
|
||||
#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69)
|
||||
|
||||
/* PPC64 eXternal Interrupt Controller Specification */
|
||||
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
|
||||
|
||||
/* Layout of 64-bit source attribute values */
|
||||
#define KVM_XICS_DESTINATION_SHIFT 0
|
||||
#define KVM_XICS_DESTINATION_MASK 0xffffffffULL
|
||||
#define KVM_XICS_PRIORITY_SHIFT 32
|
||||
#define KVM_XICS_PRIORITY_MASK 0xff
|
||||
#define KVM_XICS_LEVEL_SENSITIVE (1ULL << 40)
|
||||
#define KVM_XICS_MASKED (1ULL << 41)
|
||||
#define KVM_XICS_PENDING (1ULL << 42)
|
||||
|
||||
#endif /* __LINUX_KVM_POWERPC_H */
|
50
tools/arch/powerpc/include/uapi/asm/perf_regs.h
Normal file
50
tools/arch/powerpc/include/uapi/asm/perf_regs.h
Normal file
|
@ -0,0 +1,50 @@
|
|||
#ifndef _UAPI_ASM_POWERPC_PERF_REGS_H
|
||||
#define _UAPI_ASM_POWERPC_PERF_REGS_H
|
||||
|
||||
enum perf_event_powerpc_regs {
|
||||
PERF_REG_POWERPC_R0,
|
||||
PERF_REG_POWERPC_R1,
|
||||
PERF_REG_POWERPC_R2,
|
||||
PERF_REG_POWERPC_R3,
|
||||
PERF_REG_POWERPC_R4,
|
||||
PERF_REG_POWERPC_R5,
|
||||
PERF_REG_POWERPC_R6,
|
||||
PERF_REG_POWERPC_R7,
|
||||
PERF_REG_POWERPC_R8,
|
||||
PERF_REG_POWERPC_R9,
|
||||
PERF_REG_POWERPC_R10,
|
||||
PERF_REG_POWERPC_R11,
|
||||
PERF_REG_POWERPC_R12,
|
||||
PERF_REG_POWERPC_R13,
|
||||
PERF_REG_POWERPC_R14,
|
||||
PERF_REG_POWERPC_R15,
|
||||
PERF_REG_POWERPC_R16,
|
||||
PERF_REG_POWERPC_R17,
|
||||
PERF_REG_POWERPC_R18,
|
||||
PERF_REG_POWERPC_R19,
|
||||
PERF_REG_POWERPC_R20,
|
||||
PERF_REG_POWERPC_R21,
|
||||
PERF_REG_POWERPC_R22,
|
||||
PERF_REG_POWERPC_R23,
|
||||
PERF_REG_POWERPC_R24,
|
||||
PERF_REG_POWERPC_R25,
|
||||
PERF_REG_POWERPC_R26,
|
||||
PERF_REG_POWERPC_R27,
|
||||
PERF_REG_POWERPC_R28,
|
||||
PERF_REG_POWERPC_R29,
|
||||
PERF_REG_POWERPC_R30,
|
||||
PERF_REG_POWERPC_R31,
|
||||
PERF_REG_POWERPC_NIP,
|
||||
PERF_REG_POWERPC_MSR,
|
||||
PERF_REG_POWERPC_ORIG_R3,
|
||||
PERF_REG_POWERPC_CTR,
|
||||
PERF_REG_POWERPC_LINK,
|
||||
PERF_REG_POWERPC_XER,
|
||||
PERF_REG_POWERPC_CCR,
|
||||
PERF_REG_POWERPC_SOFTE,
|
||||
PERF_REG_POWERPC_TRAP,
|
||||
PERF_REG_POWERPC_DAR,
|
||||
PERF_REG_POWERPC_DSISR,
|
||||
PERF_REG_POWERPC_MAX,
|
||||
};
|
||||
#endif /* _UAPI_ASM_POWERPC_PERF_REGS_H */
|
12
tools/arch/s390/include/uapi/asm/bitsperlong.h
Normal file
12
tools/arch/s390/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef __ASM_S390_BITSPERLONG_H
|
||||
#define __ASM_S390_BITSPERLONG_H
|
||||
|
||||
#ifndef __s390x__
|
||||
#define __BITS_PER_LONG 32
|
||||
#else
|
||||
#define __BITS_PER_LONG 64
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_S390_BITSPERLONG_H */
|
192
tools/arch/s390/include/uapi/asm/kvm.h
Normal file
192
tools/arch/s390/include/uapi/asm/kvm.h
Normal file
|
@ -0,0 +1,192 @@
|
|||
#ifndef __LINUX_KVM_S390_H
|
||||
#define __LINUX_KVM_S390_H
|
||||
/*
|
||||
* KVM s390 specific structures and definitions
|
||||
*
|
||||
* Copyright IBM Corp. 2008
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*
|
||||
* Author(s): Carsten Otte <cotte@de.ibm.com>
|
||||
* Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
|
||||
#define __KVM_S390
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
|
||||
/* Device control API: s390-specific devices */
|
||||
#define KVM_DEV_FLIC_GET_ALL_IRQS 1
|
||||
#define KVM_DEV_FLIC_ENQUEUE 2
|
||||
#define KVM_DEV_FLIC_CLEAR_IRQS 3
|
||||
#define KVM_DEV_FLIC_APF_ENABLE 4
|
||||
#define KVM_DEV_FLIC_APF_DISABLE_WAIT 5
|
||||
#define KVM_DEV_FLIC_ADAPTER_REGISTER 6
|
||||
#define KVM_DEV_FLIC_ADAPTER_MODIFY 7
|
||||
#define KVM_DEV_FLIC_CLEAR_IO_IRQ 8
|
||||
/*
|
||||
* We can have up to 4*64k pending subchannels + 8 adapter interrupts,
|
||||
* as well as up to ASYNC_PF_PER_VCPU*KVM_MAX_VCPUS pfault done interrupts.
|
||||
* There are also sclp and machine checks. This gives us
|
||||
* sizeof(kvm_s390_irq)*(4*65536+8+64*64+1+1) = 72 * 266250 = 19170000
|
||||
* Lets round up to 8192 pages.
|
||||
*/
|
||||
#define KVM_S390_MAX_FLOAT_IRQS 266250
|
||||
#define KVM_S390_FLIC_MAX_BUFFER 0x2000000
|
||||
|
||||
struct kvm_s390_io_adapter {
|
||||
__u32 id;
|
||||
__u8 isc;
|
||||
__u8 maskable;
|
||||
__u8 swap;
|
||||
__u8 pad;
|
||||
};
|
||||
|
||||
#define KVM_S390_IO_ADAPTER_MASK 1
|
||||
#define KVM_S390_IO_ADAPTER_MAP 2
|
||||
#define KVM_S390_IO_ADAPTER_UNMAP 3
|
||||
|
||||
struct kvm_s390_io_adapter_req {
|
||||
__u32 id;
|
||||
__u8 type;
|
||||
__u8 mask;
|
||||
__u16 pad0;
|
||||
__u64 addr;
|
||||
};
|
||||
|
||||
/* kvm attr_group on vm fd */
|
||||
#define KVM_S390_VM_MEM_CTRL 0
|
||||
#define KVM_S390_VM_TOD 1
|
||||
#define KVM_S390_VM_CRYPTO 2
|
||||
#define KVM_S390_VM_CPU_MODEL 3
|
||||
|
||||
/* kvm attributes for mem_ctrl */
|
||||
#define KVM_S390_VM_MEM_ENABLE_CMMA 0
|
||||
#define KVM_S390_VM_MEM_CLR_CMMA 1
|
||||
#define KVM_S390_VM_MEM_LIMIT_SIZE 2
|
||||
|
||||
#define KVM_S390_NO_MEM_LIMIT U64_MAX
|
||||
|
||||
/* kvm attributes for KVM_S390_VM_TOD */
|
||||
#define KVM_S390_VM_TOD_LOW 0
|
||||
#define KVM_S390_VM_TOD_HIGH 1
|
||||
|
||||
/* kvm attributes for KVM_S390_VM_CPU_MODEL */
|
||||
/* processor related attributes are r/w */
|
||||
#define KVM_S390_VM_CPU_PROCESSOR 0
|
||||
struct kvm_s390_vm_cpu_processor {
|
||||
__u64 cpuid;
|
||||
__u16 ibc;
|
||||
__u8 pad[6];
|
||||
__u64 fac_list[256];
|
||||
};
|
||||
|
||||
/* machine related attributes are r/o */
|
||||
#define KVM_S390_VM_CPU_MACHINE 1
|
||||
struct kvm_s390_vm_cpu_machine {
|
||||
__u64 cpuid;
|
||||
__u32 ibc;
|
||||
__u8 pad[4];
|
||||
__u64 fac_mask[256];
|
||||
__u64 fac_list[256];
|
||||
};
|
||||
|
||||
/* kvm attributes for crypto */
|
||||
#define KVM_S390_VM_CRYPTO_ENABLE_AES_KW 0
|
||||
#define KVM_S390_VM_CRYPTO_ENABLE_DEA_KW 1
|
||||
#define KVM_S390_VM_CRYPTO_DISABLE_AES_KW 2
|
||||
#define KVM_S390_VM_CRYPTO_DISABLE_DEA_KW 3
|
||||
|
||||
/* for KVM_GET_REGS and KVM_SET_REGS */
|
||||
struct kvm_regs {
|
||||
/* general purpose regs for s390 */
|
||||
__u64 gprs[16];
|
||||
};
|
||||
|
||||
/* for KVM_GET_SREGS and KVM_SET_SREGS */
|
||||
struct kvm_sregs {
|
||||
__u32 acrs[16];
|
||||
__u64 crs[16];
|
||||
};
|
||||
|
||||
/* for KVM_GET_FPU and KVM_SET_FPU */
|
||||
struct kvm_fpu {
|
||||
__u32 fpc;
|
||||
__u64 fprs[16];
|
||||
};
|
||||
|
||||
#define KVM_GUESTDBG_USE_HW_BP 0x00010000
|
||||
|
||||
#define KVM_HW_BP 1
|
||||
#define KVM_HW_WP_WRITE 2
|
||||
#define KVM_SINGLESTEP 4
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
__u64 addr;
|
||||
__u8 type;
|
||||
__u8 pad[7]; /* Should be set to 0 */
|
||||
};
|
||||
|
||||
struct kvm_hw_breakpoint {
|
||||
__u64 addr;
|
||||
__u64 phys_addr;
|
||||
__u64 len;
|
||||
__u8 type;
|
||||
__u8 pad[7]; /* Should be set to 0 */
|
||||
};
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
struct kvm_guest_debug_arch {
|
||||
__u32 nr_hw_bp;
|
||||
__u32 pad; /* Should be set to 0 */
|
||||
struct kvm_hw_breakpoint __user *hw_bp;
|
||||
};
|
||||
|
||||
/* for KVM_SYNC_PFAULT and KVM_REG_S390_PFTOKEN */
|
||||
#define KVM_S390_PFAULT_TOKEN_INVALID 0xffffffffffffffffULL
|
||||
|
||||
#define KVM_SYNC_PREFIX (1UL << 0)
|
||||
#define KVM_SYNC_GPRS (1UL << 1)
|
||||
#define KVM_SYNC_ACRS (1UL << 2)
|
||||
#define KVM_SYNC_CRS (1UL << 3)
|
||||
#define KVM_SYNC_ARCH0 (1UL << 4)
|
||||
#define KVM_SYNC_PFAULT (1UL << 5)
|
||||
#define KVM_SYNC_VRS (1UL << 6)
|
||||
#define KVM_SYNC_RICCB (1UL << 7)
|
||||
#define KVM_SYNC_FPRS (1UL << 8)
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
__u64 prefix; /* prefix register */
|
||||
__u64 gprs[16]; /* general purpose registers */
|
||||
__u32 acrs[16]; /* access registers */
|
||||
__u64 crs[16]; /* control registers */
|
||||
__u64 todpr; /* tod programmable register [ARCH0] */
|
||||
__u64 cputm; /* cpu timer [ARCH0] */
|
||||
__u64 ckc; /* clock comparator [ARCH0] */
|
||||
__u64 pp; /* program parameter [ARCH0] */
|
||||
__u64 gbea; /* guest breaking-event address [ARCH0] */
|
||||
__u64 pft; /* pfault token [PFAULT] */
|
||||
__u64 pfs; /* pfault select [PFAULT] */
|
||||
__u64 pfc; /* pfault compare [PFAULT] */
|
||||
union {
|
||||
__u64 vrs[32][2]; /* vector registers (KVM_SYNC_VRS) */
|
||||
__u64 fprs[16]; /* fp registers (KVM_SYNC_FPRS) */
|
||||
};
|
||||
__u8 reserved[512]; /* for future vector expansion */
|
||||
__u32 fpc; /* valid on KVM_SYNC_VRS or KVM_SYNC_FPRS */
|
||||
__u8 padding[52]; /* riccb needs to be 64byte aligned */
|
||||
__u8 riccb[64]; /* runtime instrumentation controls block */
|
||||
};
|
||||
|
||||
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
|
||||
#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2)
|
||||
#define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3)
|
||||
#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4)
|
||||
#define KVM_REG_S390_PFTOKEN (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x5)
|
||||
#define KVM_REG_S390_PFCOMPARE (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x6)
|
||||
#define KVM_REG_S390_PFSELECT (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x7)
|
||||
#define KVM_REG_S390_PP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x8)
|
||||
#define KVM_REG_S390_GBEA (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x9)
|
||||
#endif
|
25
tools/arch/s390/include/uapi/asm/kvm_perf.h
Normal file
25
tools/arch/s390/include/uapi/asm/kvm_perf.h
Normal file
|
@ -0,0 +1,25 @@
|
|||
/*
|
||||
* Definitions for perf-kvm on s390
|
||||
*
|
||||
* Copyright 2014 IBM Corp.
|
||||
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License (version 2 only)
|
||||
* as published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __LINUX_KVM_PERF_S390_H
|
||||
#define __LINUX_KVM_PERF_S390_H
|
||||
|
||||
#include <asm/sie.h>
|
||||
|
||||
#define DECODE_STR_LEN 40
|
||||
|
||||
#define VCPU_ID "id"
|
||||
|
||||
#define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter"
|
||||
#define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit"
|
||||
#define KVM_EXIT_REASON "icptcode"
|
||||
|
||||
#endif
|
250
tools/arch/s390/include/uapi/asm/sie.h
Normal file
250
tools/arch/s390/include/uapi/asm/sie.h
Normal file
|
@ -0,0 +1,250 @@
|
|||
#ifndef _UAPI_ASM_S390_SIE_H
|
||||
#define _UAPI_ASM_S390_SIE_H
|
||||
|
||||
#define diagnose_codes \
|
||||
{ 0x10, "DIAG (0x10) release pages" }, \
|
||||
{ 0x44, "DIAG (0x44) time slice end" }, \
|
||||
{ 0x9c, "DIAG (0x9c) time slice end directed" }, \
|
||||
{ 0x204, "DIAG (0x204) logical-cpu utilization" }, \
|
||||
{ 0x258, "DIAG (0x258) page-reference services" }, \
|
||||
{ 0x288, "DIAG (0x288) watchdog functions" }, \
|
||||
{ 0x308, "DIAG (0x308) ipl functions" }, \
|
||||
{ 0x500, "DIAG (0x500) KVM virtio functions" }, \
|
||||
{ 0x501, "DIAG (0x501) KVM breakpoint" }
|
||||
|
||||
#define sigp_order_codes \
|
||||
{ 0x01, "SIGP sense" }, \
|
||||
{ 0x02, "SIGP external call" }, \
|
||||
{ 0x03, "SIGP emergency signal" }, \
|
||||
{ 0x04, "SIGP start" }, \
|
||||
{ 0x05, "SIGP stop" }, \
|
||||
{ 0x06, "SIGP restart" }, \
|
||||
{ 0x09, "SIGP stop and store status" }, \
|
||||
{ 0x0b, "SIGP initial cpu reset" }, \
|
||||
{ 0x0c, "SIGP cpu reset" }, \
|
||||
{ 0x0d, "SIGP set prefix" }, \
|
||||
{ 0x0e, "SIGP store status at address" }, \
|
||||
{ 0x12, "SIGP set architecture" }, \
|
||||
{ 0x13, "SIGP conditional emergency signal" }, \
|
||||
{ 0x15, "SIGP sense running" }, \
|
||||
{ 0x16, "SIGP set multithreading"}, \
|
||||
{ 0x17, "SIGP store additional status ait address"}
|
||||
|
||||
#define icpt_prog_codes \
|
||||
{ 0x0001, "Prog Operation" }, \
|
||||
{ 0x0002, "Prog Privileged Operation" }, \
|
||||
{ 0x0003, "Prog Execute" }, \
|
||||
{ 0x0004, "Prog Protection" }, \
|
||||
{ 0x0005, "Prog Addressing" }, \
|
||||
{ 0x0006, "Prog Specification" }, \
|
||||
{ 0x0007, "Prog Data" }, \
|
||||
{ 0x0008, "Prog Fixedpoint overflow" }, \
|
||||
{ 0x0009, "Prog Fixedpoint divide" }, \
|
||||
{ 0x000A, "Prog Decimal overflow" }, \
|
||||
{ 0x000B, "Prog Decimal divide" }, \
|
||||
{ 0x000C, "Prog HFP exponent overflow" }, \
|
||||
{ 0x000D, "Prog HFP exponent underflow" }, \
|
||||
{ 0x000E, "Prog HFP significance" }, \
|
||||
{ 0x000F, "Prog HFP divide" }, \
|
||||
{ 0x0010, "Prog Segment translation" }, \
|
||||
{ 0x0011, "Prog Page translation" }, \
|
||||
{ 0x0012, "Prog Translation specification" }, \
|
||||
{ 0x0013, "Prog Special operation" }, \
|
||||
{ 0x0015, "Prog Operand" }, \
|
||||
{ 0x0016, "Prog Trace table" }, \
|
||||
{ 0x0017, "Prog ASNtranslation specification" }, \
|
||||
{ 0x001C, "Prog Spaceswitch event" }, \
|
||||
{ 0x001D, "Prog HFP square root" }, \
|
||||
{ 0x001F, "Prog PCtranslation specification" }, \
|
||||
{ 0x0020, "Prog AFX translation" }, \
|
||||
{ 0x0021, "Prog ASX translation" }, \
|
||||
{ 0x0022, "Prog LX translation" }, \
|
||||
{ 0x0023, "Prog EX translation" }, \
|
||||
{ 0x0024, "Prog Primary authority" }, \
|
||||
{ 0x0025, "Prog Secondary authority" }, \
|
||||
{ 0x0026, "Prog LFXtranslation exception" }, \
|
||||
{ 0x0027, "Prog LSXtranslation exception" }, \
|
||||
{ 0x0028, "Prog ALET specification" }, \
|
||||
{ 0x0029, "Prog ALEN translation" }, \
|
||||
{ 0x002A, "Prog ALE sequence" }, \
|
||||
{ 0x002B, "Prog ASTE validity" }, \
|
||||
{ 0x002C, "Prog ASTE sequence" }, \
|
||||
{ 0x002D, "Prog Extended authority" }, \
|
||||
{ 0x002E, "Prog LSTE sequence" }, \
|
||||
{ 0x002F, "Prog ASTE instance" }, \
|
||||
{ 0x0030, "Prog Stack full" }, \
|
||||
{ 0x0031, "Prog Stack empty" }, \
|
||||
{ 0x0032, "Prog Stack specification" }, \
|
||||
{ 0x0033, "Prog Stack type" }, \
|
||||
{ 0x0034, "Prog Stack operation" }, \
|
||||
{ 0x0039, "Prog Region first translation" }, \
|
||||
{ 0x003A, "Prog Region second translation" }, \
|
||||
{ 0x003B, "Prog Region third translation" }, \
|
||||
{ 0x0040, "Prog Monitor event" }, \
|
||||
{ 0x0080, "Prog PER event" }, \
|
||||
{ 0x0119, "Prog Crypto operation" }
|
||||
|
||||
#define exit_code_ipa0(ipa0, opcode, mnemonic) \
|
||||
{ (ipa0 << 8 | opcode), #ipa0 " " mnemonic }
|
||||
#define exit_code(opcode, mnemonic) \
|
||||
{ opcode, mnemonic }
|
||||
|
||||
#define icpt_insn_codes \
|
||||
exit_code_ipa0(0x01, 0x01, "PR"), \
|
||||
exit_code_ipa0(0x01, 0x04, "PTFF"), \
|
||||
exit_code_ipa0(0x01, 0x07, "SCKPF"), \
|
||||
exit_code_ipa0(0xAA, 0x00, "RINEXT"), \
|
||||
exit_code_ipa0(0xAA, 0x01, "RION"), \
|
||||
exit_code_ipa0(0xAA, 0x02, "TRIC"), \
|
||||
exit_code_ipa0(0xAA, 0x03, "RIOFF"), \
|
||||
exit_code_ipa0(0xAA, 0x04, "RIEMIT"), \
|
||||
exit_code_ipa0(0xB2, 0x02, "STIDP"), \
|
||||
exit_code_ipa0(0xB2, 0x04, "SCK"), \
|
||||
exit_code_ipa0(0xB2, 0x05, "STCK"), \
|
||||
exit_code_ipa0(0xB2, 0x06, "SCKC"), \
|
||||
exit_code_ipa0(0xB2, 0x07, "STCKC"), \
|
||||
exit_code_ipa0(0xB2, 0x08, "SPT"), \
|
||||
exit_code_ipa0(0xB2, 0x09, "STPT"), \
|
||||
exit_code_ipa0(0xB2, 0x0d, "PTLB"), \
|
||||
exit_code_ipa0(0xB2, 0x10, "SPX"), \
|
||||
exit_code_ipa0(0xB2, 0x11, "STPX"), \
|
||||
exit_code_ipa0(0xB2, 0x12, "STAP"), \
|
||||
exit_code_ipa0(0xB2, 0x14, "SIE"), \
|
||||
exit_code_ipa0(0xB2, 0x16, "SETR"), \
|
||||
exit_code_ipa0(0xB2, 0x17, "STETR"), \
|
||||
exit_code_ipa0(0xB2, 0x18, "PC"), \
|
||||
exit_code_ipa0(0xB2, 0x20, "SERVC"), \
|
||||
exit_code_ipa0(0xB2, 0x21, "IPTE"), \
|
||||
exit_code_ipa0(0xB2, 0x28, "PT"), \
|
||||
exit_code_ipa0(0xB2, 0x29, "ISKE"), \
|
||||
exit_code_ipa0(0xB2, 0x2a, "RRBE"), \
|
||||
exit_code_ipa0(0xB2, 0x2b, "SSKE"), \
|
||||
exit_code_ipa0(0xB2, 0x2c, "TB"), \
|
||||
exit_code_ipa0(0xB2, 0x2e, "PGIN"), \
|
||||
exit_code_ipa0(0xB2, 0x2f, "PGOUT"), \
|
||||
exit_code_ipa0(0xB2, 0x30, "CSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x31, "HSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x32, "MSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x33, "SSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x34, "STSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x35, "TSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x36, "TPI"), \
|
||||
exit_code_ipa0(0xB2, 0x37, "SAL"), \
|
||||
exit_code_ipa0(0xB2, 0x38, "RSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x39, "STCRW"), \
|
||||
exit_code_ipa0(0xB2, 0x3a, "STCPS"), \
|
||||
exit_code_ipa0(0xB2, 0x3b, "RCHP"), \
|
||||
exit_code_ipa0(0xB2, 0x3c, "SCHM"), \
|
||||
exit_code_ipa0(0xB2, 0x40, "BAKR"), \
|
||||
exit_code_ipa0(0xB2, 0x48, "PALB"), \
|
||||
exit_code_ipa0(0xB2, 0x4c, "TAR"), \
|
||||
exit_code_ipa0(0xB2, 0x50, "CSP"), \
|
||||
exit_code_ipa0(0xB2, 0x54, "MVPG"), \
|
||||
exit_code_ipa0(0xB2, 0x58, "BSG"), \
|
||||
exit_code_ipa0(0xB2, 0x5a, "BSA"), \
|
||||
exit_code_ipa0(0xB2, 0x5f, "CHSC"), \
|
||||
exit_code_ipa0(0xB2, 0x74, "SIGA"), \
|
||||
exit_code_ipa0(0xB2, 0x76, "XSCH"), \
|
||||
exit_code_ipa0(0xB2, 0x78, "STCKE"), \
|
||||
exit_code_ipa0(0xB2, 0x7c, "STCKF"), \
|
||||
exit_code_ipa0(0xB2, 0x7d, "STSI"), \
|
||||
exit_code_ipa0(0xB2, 0xb0, "STFLE"), \
|
||||
exit_code_ipa0(0xB2, 0xb1, "STFL"), \
|
||||
exit_code_ipa0(0xB2, 0xb2, "LPSWE"), \
|
||||
exit_code_ipa0(0xB2, 0xf8, "TEND"), \
|
||||
exit_code_ipa0(0xB2, 0xfc, "TABORT"), \
|
||||
exit_code_ipa0(0xB9, 0x1e, "KMAC"), \
|
||||
exit_code_ipa0(0xB9, 0x28, "PCKMO"), \
|
||||
exit_code_ipa0(0xB9, 0x2a, "KMF"), \
|
||||
exit_code_ipa0(0xB9, 0x2b, "KMO"), \
|
||||
exit_code_ipa0(0xB9, 0x2d, "KMCTR"), \
|
||||
exit_code_ipa0(0xB9, 0x2e, "KM"), \
|
||||
exit_code_ipa0(0xB9, 0x2f, "KMC"), \
|
||||
exit_code_ipa0(0xB9, 0x3e, "KIMD"), \
|
||||
exit_code_ipa0(0xB9, 0x3f, "KLMD"), \
|
||||
exit_code_ipa0(0xB9, 0x8a, "CSPG"), \
|
||||
exit_code_ipa0(0xB9, 0x8d, "EPSW"), \
|
||||
exit_code_ipa0(0xB9, 0x8e, "IDTE"), \
|
||||
exit_code_ipa0(0xB9, 0x8f, "CRDTE"), \
|
||||
exit_code_ipa0(0xB9, 0x9c, "EQBS"), \
|
||||
exit_code_ipa0(0xB9, 0xa2, "PTF"), \
|
||||
exit_code_ipa0(0xB9, 0xab, "ESSA"), \
|
||||
exit_code_ipa0(0xB9, 0xae, "RRBM"), \
|
||||
exit_code_ipa0(0xB9, 0xaf, "PFMF"), \
|
||||
exit_code_ipa0(0xE3, 0x03, "LRAG"), \
|
||||
exit_code_ipa0(0xE3, 0x13, "LRAY"), \
|
||||
exit_code_ipa0(0xE3, 0x25, "NTSTG"), \
|
||||
exit_code_ipa0(0xE5, 0x00, "LASP"), \
|
||||
exit_code_ipa0(0xE5, 0x01, "TPROT"), \
|
||||
exit_code_ipa0(0xE5, 0x60, "TBEGIN"), \
|
||||
exit_code_ipa0(0xE5, 0x61, "TBEGINC"), \
|
||||
exit_code_ipa0(0xEB, 0x25, "STCTG"), \
|
||||
exit_code_ipa0(0xEB, 0x2f, "LCTLG"), \
|
||||
exit_code_ipa0(0xEB, 0x60, "LRIC"), \
|
||||
exit_code_ipa0(0xEB, 0x61, "STRIC"), \
|
||||
exit_code_ipa0(0xEB, 0x62, "MRIC"), \
|
||||
exit_code_ipa0(0xEB, 0x8a, "SQBS"), \
|
||||
exit_code_ipa0(0xC8, 0x01, "ECTG"), \
|
||||
exit_code(0x0a, "SVC"), \
|
||||
exit_code(0x80, "SSM"), \
|
||||
exit_code(0x82, "LPSW"), \
|
||||
exit_code(0x83, "DIAG"), \
|
||||
exit_code(0xae, "SIGP"), \
|
||||
exit_code(0xac, "STNSM"), \
|
||||
exit_code(0xad, "STOSM"), \
|
||||
exit_code(0xb1, "LRA"), \
|
||||
exit_code(0xb6, "STCTL"), \
|
||||
exit_code(0xb7, "LCTL"), \
|
||||
exit_code(0xee, "PLO")
|
||||
|
||||
#define sie_intercept_code \
|
||||
{ 0x00, "Host interruption" }, \
|
||||
{ 0x04, "Instruction" }, \
|
||||
{ 0x08, "Program interruption" }, \
|
||||
{ 0x0c, "Instruction and program interruption" }, \
|
||||
{ 0x10, "External request" }, \
|
||||
{ 0x14, "External interruption" }, \
|
||||
{ 0x18, "I/O request" }, \
|
||||
{ 0x1c, "Wait state" }, \
|
||||
{ 0x20, "Validity" }, \
|
||||
{ 0x28, "Stop request" }, \
|
||||
{ 0x2c, "Operation exception" }, \
|
||||
{ 0x38, "Partial-execution" }, \
|
||||
{ 0x3c, "I/O interruption" }, \
|
||||
{ 0x40, "I/O instruction" }, \
|
||||
{ 0x48, "Timing subset" }
|
||||
|
||||
/*
|
||||
* This is the simple interceptable instructions decoder.
|
||||
*
|
||||
* It will be used as userspace interface and it can be used in places
|
||||
* that does not allow to use general decoder functions,
|
||||
* such as trace events declarations.
|
||||
*
|
||||
* Some userspace tools may want to parse this code
|
||||
* and would be confused by switch(), if() and other statements,
|
||||
* but they can understand conditional operator.
|
||||
*/
|
||||
#define INSN_DECODE_IPA0(ipa0, insn, rshift, mask) \
|
||||
(insn >> 56) == (ipa0) ? \
|
||||
((ipa0 << 8) | ((insn >> rshift) & mask)) :
|
||||
|
||||
#define INSN_DECODE(insn) (insn >> 56)
|
||||
|
||||
/*
|
||||
* The macro icpt_insn_decoder() takes an intercepted instruction
|
||||
* and returns a key, which can be used to find a mnemonic name
|
||||
* of the instruction in the icpt_insn_codes table.
|
||||
*/
|
||||
#define icpt_insn_decoder(insn) ( \
|
||||
INSN_DECODE_IPA0(0x01, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xaa, insn, 48, 0x0f) \
|
||||
INSN_DECODE_IPA0(0xb2, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xb9, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xe3, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xe5, insn, 48, 0xff) \
|
||||
INSN_DECODE_IPA0(0xeb, insn, 16, 0xff) \
|
||||
INSN_DECODE_IPA0(0xc8, insn, 48, 0x0f) \
|
||||
INSN_DECODE(insn))
|
||||
|
||||
#endif /* _UAPI_ASM_S390_SIE_H */
|
6
tools/arch/score/include/uapi/asm/bitsperlong.h
Normal file
6
tools/arch/score/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#ifndef _ASM_SCORE_BITSPERLONG_H
|
||||
#define _ASM_SCORE_BITSPERLONG_H
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* _ASM_SCORE_BITSPERLONG_H */
|
12
tools/arch/sparc/include/uapi/asm/bitsperlong.h
Normal file
12
tools/arch/sparc/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef __ASM_ALPHA_BITSPERLONG_H
|
||||
#define __ASM_ALPHA_BITSPERLONG_H
|
||||
|
||||
#if defined(__sparc__) && defined(__arch64__)
|
||||
#define __BITS_PER_LONG 64
|
||||
#else
|
||||
#define __BITS_PER_LONG 32
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_ALPHA_BITSPERLONG_H */
|
26
tools/arch/tile/include/uapi/asm/bitsperlong.h
Normal file
26
tools/arch/tile/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
/*
|
||||
* Copyright 2010 Tilera Corporation. All Rights Reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation, version 2.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
|
||||
* NON INFRINGEMENT. See the GNU General Public License for
|
||||
* more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TILE_BITSPERLONG_H
|
||||
#define _ASM_TILE_BITSPERLONG_H
|
||||
|
||||
#ifdef __LP64__
|
||||
# define __BITS_PER_LONG 64
|
||||
#else
|
||||
# define __BITS_PER_LONG 32
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* _ASM_TILE_BITSPERLONG_H */
|
316
tools/arch/x86/include/asm/cpufeatures.h
Normal file
316
tools/arch/x86/include/asm/cpufeatures.h
Normal file
|
@ -0,0 +1,316 @@
|
|||
#ifndef _ASM_X86_CPUFEATURES_H
|
||||
#define _ASM_X86_CPUFEATURES_H
|
||||
|
||||
#ifndef _ASM_X86_REQUIRED_FEATURES_H
|
||||
#include <asm/required-features.h>
|
||||
#endif
|
||||
|
||||
#ifndef _ASM_X86_DISABLED_FEATURES_H
|
||||
#include <asm/disabled-features.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Defines x86 CPU feature bits
|
||||
*/
|
||||
#define NCAPINTS 18 /* N 32-bit words worth of info */
|
||||
#define NBUGINTS 1 /* N 32-bit bug flags */
|
||||
|
||||
/*
|
||||
* Note: If the comment begins with a quoted string, that string is used
|
||||
* in /proc/cpuinfo instead of the macro name. If the string is "",
|
||||
* this feature bit is not displayed in /proc/cpuinfo at all.
|
||||
*/
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
|
||||
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
|
||||
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
|
||||
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
|
||||
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
|
||||
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
|
||||
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
|
||||
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
|
||||
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
|
||||
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
|
||||
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
|
||||
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
|
||||
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
|
||||
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
|
||||
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
|
||||
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */
|
||||
/* (plus FCMOVcc, FCOMI with FPU) */
|
||||
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
|
||||
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
|
||||
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
|
||||
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
|
||||
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
|
||||
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
|
||||
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
|
||||
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
|
||||
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
|
||||
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
|
||||
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
|
||||
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
|
||||
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
|
||||
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
|
||||
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
|
||||
/* Don't duplicate feature flags which are redundant with Intel! */
|
||||
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
|
||||
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */
|
||||
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
|
||||
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
|
||||
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
|
||||
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
|
||||
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
|
||||
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */
|
||||
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */
|
||||
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */
|
||||
|
||||
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
|
||||
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
|
||||
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
|
||||
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
|
||||
|
||||
/* Other features, Linux-defined mapping, word 3 */
|
||||
/* This range is used for feature bits which conflict or are synthesized */
|
||||
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
|
||||
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
|
||||
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
|
||||
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
|
||||
/* cpu types for specific tunings: */
|
||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
||||
#define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */
|
||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
||||
#define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */
|
||||
#define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */
|
||||
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
|
||||
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */
|
||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */
|
||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */
|
||||
#define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */
|
||||
#define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */
|
||||
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
|
||||
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
|
||||
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
|
||||
/* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */
|
||||
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
|
||||
#define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */
|
||||
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
|
||||
#define X86_FEATURE_MCE_RECOVERY ( 3*32+31) /* cpu has recoverable machine checks */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
|
||||
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
|
||||
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
|
||||
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
|
||||
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */
|
||||
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */
|
||||
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
|
||||
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */
|
||||
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
|
||||
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
|
||||
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
|
||||
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
|
||||
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
|
||||
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
|
||||
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */
|
||||
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
|
||||
#define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */
|
||||
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
|
||||
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
|
||||
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
|
||||
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
|
||||
#define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */
|
||||
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
|
||||
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
|
||||
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */
|
||||
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
|
||||
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
|
||||
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */
|
||||
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
|
||||
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */
|
||||
#define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */
|
||||
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
|
||||
|
||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
|
||||
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
|
||||
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
|
||||
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
|
||||
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
|
||||
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
|
||||
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
|
||||
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
|
||||
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
|
||||
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
|
||||
|
||||
/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
|
||||
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
|
||||
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
|
||||
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */
|
||||
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
|
||||
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
|
||||
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
|
||||
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
|
||||
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
|
||||
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
|
||||
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
|
||||
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
|
||||
#define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */
|
||||
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */
|
||||
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */
|
||||
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
|
||||
#define X86_FEATURE_BPEXT (6*32+26) /* data breakpoint extension */
|
||||
#define X86_FEATURE_PTSC ( 6*32+27) /* performance time-stamp counter */
|
||||
#define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */
|
||||
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX) */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
* CPUID levels like 0x6, 0xA etc, word 7.
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
|
||||
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
|
||||
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
|
||||
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
|
||||
#define X86_FEATURE_INTEL_PT ( 7*32+15) /* Intel Processor Trace */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
#define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */
|
||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */
|
||||
|
||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer vmmcall to vmcall */
|
||||
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
|
||||
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
|
||||
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
|
||||
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */
|
||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
|
||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */
|
||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
|
||||
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
|
||||
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
|
||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
|
||||
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
|
||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_PCOMMIT ( 9*32+22) /* PCOMMIT instruction */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
|
||||
#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
|
||||
#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
|
||||
|
||||
/* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */
|
||||
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */
|
||||
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */
|
||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:0 (edx), word 11 */
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 1) /* LLC QoS if 1 */
|
||||
|
||||
/* Intel-defined CPU QoS Sub-leaf, CPUID level 0x0000000F:1 (edx), word 12 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (12*32+ 0) /* LLC occupancy monitoring if 1 */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (12*32+ 1) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (12*32+ 2) /* LLC Local MBM monitoring */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000008 (ebx), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+1) /* Instructions Retired Count */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (eax), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
|
||||
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
|
||||
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
|
||||
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
|
||||
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
|
||||
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
|
||||
|
||||
/* AMD SVM Feature Identification, CPUID level 0x8000000a (edx), word 15 */
|
||||
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
|
||||
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
|
||||
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
|
||||
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
|
||||
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
|
||||
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
|
||||
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ecx), word 16 */
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */
|
||||
#define X86_FEATURE_SUCCOR (17*32+1) /* Uncorrectable error containment and recovery */
|
||||
#define X86_FEATURE_SMCA (17*32+3) /* Scalable MCA */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
#define X86_BUG(x) (NCAPINTS*32 + (x))
|
||||
|
||||
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
|
||||
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
|
||||
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
|
||||
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
|
||||
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
|
||||
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
|
||||
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
|
||||
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
|
||||
#define X86_BUG_NULL_SEG X86_BUG(9) /* Nulling a selector preserves the base */
|
||||
#define X86_BUG_SWAPGS_FENCE X86_BUG(10) /* SWAPGS without input dep on GS */
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
|
||||
* to avoid confusion.
|
||||
*/
|
||||
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
60
tools/arch/x86/include/asm/disabled-features.h
Normal file
60
tools/arch/x86/include/asm/disabled-features.h
Normal file
|
@ -0,0 +1,60 @@
|
|||
#ifndef _ASM_X86_DISABLED_FEATURES_H
|
||||
#define _ASM_X86_DISABLED_FEATURES_H
|
||||
|
||||
/* These features, although they might be available in a CPU
|
||||
* will not be used because the compile options to support
|
||||
* them are not present.
|
||||
*
|
||||
* This code allows them to be checked and disabled at
|
||||
* compile time without an explicit #ifdef. Use
|
||||
* cpu_feature_enabled().
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MPX
|
||||
# define DISABLE_MPX 0
|
||||
#else
|
||||
# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
# define DISABLE_VME (1<<(X86_FEATURE_VME & 31))
|
||||
# define DISABLE_K6_MTRR (1<<(X86_FEATURE_K6_MTRR & 31))
|
||||
# define DISABLE_CYRIX_ARR (1<<(X86_FEATURE_CYRIX_ARR & 31))
|
||||
# define DISABLE_CENTAUR_MCR (1<<(X86_FEATURE_CENTAUR_MCR & 31))
|
||||
#else
|
||||
# define DISABLE_VME 0
|
||||
# define DISABLE_K6_MTRR 0
|
||||
# define DISABLE_CYRIX_ARR 0
|
||||
# define DISABLE_CENTAUR_MCR 0
|
||||
#endif /* CONFIG_X86_64 */
|
||||
|
||||
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
|
||||
# define DISABLE_PKU 0
|
||||
# define DISABLE_OSPKE 0
|
||||
#else
|
||||
# define DISABLE_PKU (1<<(X86_FEATURE_PKU & 31))
|
||||
# define DISABLE_OSPKE (1<<(X86_FEATURE_OSPKE & 31))
|
||||
#endif /* CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS */
|
||||
|
||||
/*
|
||||
* Make sure to add features to the correct mask
|
||||
*/
|
||||
#define DISABLED_MASK0 (DISABLE_VME)
|
||||
#define DISABLED_MASK1 0
|
||||
#define DISABLED_MASK2 0
|
||||
#define DISABLED_MASK3 (DISABLE_CYRIX_ARR|DISABLE_CENTAUR_MCR|DISABLE_K6_MTRR)
|
||||
#define DISABLED_MASK4 0
|
||||
#define DISABLED_MASK5 0
|
||||
#define DISABLED_MASK6 0
|
||||
#define DISABLED_MASK7 0
|
||||
#define DISABLED_MASK8 0
|
||||
#define DISABLED_MASK9 (DISABLE_MPX)
|
||||
#define DISABLED_MASK10 0
|
||||
#define DISABLED_MASK11 0
|
||||
#define DISABLED_MASK12 0
|
||||
#define DISABLED_MASK13 0
|
||||
#define DISABLED_MASK14 0
|
||||
#define DISABLED_MASK15 0
|
||||
#define DISABLED_MASK16 (DISABLE_PKU|DISABLE_OSPKE)
|
||||
|
||||
#endif /* _ASM_X86_DISABLED_FEATURES_H */
|
103
tools/arch/x86/include/asm/required-features.h
Normal file
103
tools/arch/x86/include/asm/required-features.h
Normal file
|
@ -0,0 +1,103 @@
|
|||
#ifndef _ASM_X86_REQUIRED_FEATURES_H
|
||||
#define _ASM_X86_REQUIRED_FEATURES_H
|
||||
|
||||
/* Define minimum CPUID feature set for kernel These bits are checked
|
||||
really early to actually display a visible error message before the
|
||||
kernel dies. Make sure to assign features to the proper mask!
|
||||
|
||||
Some requirements that are not in CPUID yet are also in the
|
||||
CONFIG_X86_MINIMUM_CPU_FAMILY which is checked too.
|
||||
|
||||
The real information is in arch/x86/Kconfig.cpu, this just converts
|
||||
the CONFIGs into a bitmask */
|
||||
|
||||
#ifndef CONFIG_MATH_EMULATION
|
||||
# define NEED_FPU (1<<(X86_FEATURE_FPU & 31))
|
||||
#else
|
||||
# define NEED_FPU 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
|
||||
# define NEED_PAE (1<<(X86_FEATURE_PAE & 31))
|
||||
#else
|
||||
# define NEED_PAE 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_CMPXCHG64
|
||||
# define NEED_CX8 (1<<(X86_FEATURE_CX8 & 31))
|
||||
#else
|
||||
# define NEED_CX8 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_CMOV) || defined(CONFIG_X86_64)
|
||||
# define NEED_CMOV (1<<(X86_FEATURE_CMOV & 31))
|
||||
#else
|
||||
# define NEED_CMOV 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_USE_3DNOW
|
||||
# define NEED_3DNOW (1<<(X86_FEATURE_3DNOW & 31))
|
||||
#else
|
||||
# define NEED_3DNOW 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_P6_NOP) || defined(CONFIG_X86_64)
|
||||
# define NEED_NOPL (1<<(X86_FEATURE_NOPL & 31))
|
||||
#else
|
||||
# define NEED_NOPL 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MATOM
|
||||
# define NEED_MOVBE (1<<(X86_FEATURE_MOVBE & 31))
|
||||
#else
|
||||
# define NEED_MOVBE 0
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
#ifdef CONFIG_PARAVIRT
|
||||
/* Paravirtualized systems may not have PSE or PGE available */
|
||||
#define NEED_PSE 0
|
||||
#define NEED_PGE 0
|
||||
#else
|
||||
#define NEED_PSE (1<<(X86_FEATURE_PSE) & 31)
|
||||
#define NEED_PGE (1<<(X86_FEATURE_PGE) & 31)
|
||||
#endif
|
||||
#define NEED_MSR (1<<(X86_FEATURE_MSR & 31))
|
||||
#define NEED_FXSR (1<<(X86_FEATURE_FXSR & 31))
|
||||
#define NEED_XMM (1<<(X86_FEATURE_XMM & 31))
|
||||
#define NEED_XMM2 (1<<(X86_FEATURE_XMM2 & 31))
|
||||
#define NEED_LM (1<<(X86_FEATURE_LM & 31))
|
||||
#else
|
||||
#define NEED_PSE 0
|
||||
#define NEED_MSR 0
|
||||
#define NEED_PGE 0
|
||||
#define NEED_FXSR 0
|
||||
#define NEED_XMM 0
|
||||
#define NEED_XMM2 0
|
||||
#define NEED_LM 0
|
||||
#endif
|
||||
|
||||
#define REQUIRED_MASK0 (NEED_FPU|NEED_PSE|NEED_MSR|NEED_PAE|\
|
||||
NEED_CX8|NEED_PGE|NEED_FXSR|NEED_CMOV|\
|
||||
NEED_XMM|NEED_XMM2)
|
||||
#define SSE_MASK (NEED_XMM|NEED_XMM2)
|
||||
|
||||
#define REQUIRED_MASK1 (NEED_LM|NEED_3DNOW)
|
||||
|
||||
#define REQUIRED_MASK2 0
|
||||
#define REQUIRED_MASK3 (NEED_NOPL)
|
||||
#define REQUIRED_MASK4 (NEED_MOVBE)
|
||||
#define REQUIRED_MASK5 0
|
||||
#define REQUIRED_MASK6 0
|
||||
#define REQUIRED_MASK7 0
|
||||
#define REQUIRED_MASK8 0
|
||||
#define REQUIRED_MASK9 0
|
||||
#define REQUIRED_MASK10 0
|
||||
#define REQUIRED_MASK11 0
|
||||
#define REQUIRED_MASK12 0
|
||||
#define REQUIRED_MASK13 0
|
||||
#define REQUIRED_MASK14 0
|
||||
#define REQUIRED_MASK15 0
|
||||
#define REQUIRED_MASK16 0
|
||||
|
||||
#endif /* _ASM_X86_REQUIRED_FEATURES_H */
|
12
tools/arch/x86/include/asm/unistd_32.h
Normal file
12
tools/arch/x86/include/asm/unistd_32.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef __NR_perf_event_open
|
||||
# define __NR_perf_event_open 336
|
||||
#endif
|
||||
#ifndef __NR_futex
|
||||
# define __NR_futex 240
|
||||
#endif
|
||||
#ifndef __NR_gettid
|
||||
# define __NR_gettid 224
|
||||
#endif
|
||||
#ifndef __NR_getcpu
|
||||
# define __NR_getcpu 318
|
||||
#endif
|
12
tools/arch/x86/include/asm/unistd_64.h
Normal file
12
tools/arch/x86/include/asm/unistd_64.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef __NR_perf_event_open
|
||||
# define __NR_perf_event_open 298
|
||||
#endif
|
||||
#ifndef __NR_futex
|
||||
# define __NR_futex 202
|
||||
#endif
|
||||
#ifndef __NR_gettid
|
||||
# define __NR_gettid 186
|
||||
#endif
|
||||
#ifndef __NR_getcpu
|
||||
# define __NR_getcpu 309
|
||||
#endif
|
12
tools/arch/x86/include/uapi/asm/bitsperlong.h
Normal file
12
tools/arch/x86/include/uapi/asm/bitsperlong.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
#ifndef __ASM_X86_BITSPERLONG_H
|
||||
#define __ASM_X86_BITSPERLONG_H
|
||||
|
||||
#if defined(__x86_64__) && !defined(__ILP32__)
|
||||
# define __BITS_PER_LONG 64
|
||||
#else
|
||||
# define __BITS_PER_LONG 32
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
||||
#endif /* __ASM_X86_BITSPERLONG_H */
|
360
tools/arch/x86/include/uapi/asm/kvm.h
Normal file
360
tools/arch/x86/include/uapi/asm/kvm.h
Normal file
|
@ -0,0 +1,360 @@
|
|||
#ifndef _ASM_X86_KVM_H
|
||||
#define _ASM_X86_KVM_H
|
||||
|
||||
/*
|
||||
* KVM x86 specific structures and definitions
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
||||
#define DE_VECTOR 0
|
||||
#define DB_VECTOR 1
|
||||
#define BP_VECTOR 3
|
||||
#define OF_VECTOR 4
|
||||
#define BR_VECTOR 5
|
||||
#define UD_VECTOR 6
|
||||
#define NM_VECTOR 7
|
||||
#define DF_VECTOR 8
|
||||
#define TS_VECTOR 10
|
||||
#define NP_VECTOR 11
|
||||
#define SS_VECTOR 12
|
||||
#define GP_VECTOR 13
|
||||
#define PF_VECTOR 14
|
||||
#define MF_VECTOR 16
|
||||
#define AC_VECTOR 17
|
||||
#define MC_VECTOR 18
|
||||
#define XM_VECTOR 19
|
||||
#define VE_VECTOR 20
|
||||
|
||||
/* Select x86 specific features in <linux/kvm.h> */
|
||||
#define __KVM_HAVE_PIT
|
||||
#define __KVM_HAVE_IOAPIC
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_MSI
|
||||
#define __KVM_HAVE_USER_NMI
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_MSIX
|
||||
#define __KVM_HAVE_MCE
|
||||
#define __KVM_HAVE_PIT_STATE2
|
||||
#define __KVM_HAVE_XEN_HVM
|
||||
#define __KVM_HAVE_VCPU_EVENTS
|
||||
#define __KVM_HAVE_DEBUGREGS
|
||||
#define __KVM_HAVE_XSAVE
|
||||
#define __KVM_HAVE_XCRS
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
|
||||
/* Architectural interrupt line count. */
|
||||
#define KVM_NR_INTERRUPTS 256
|
||||
|
||||
struct kvm_memory_alias {
|
||||
__u32 slot; /* this has a different namespace than memory slots */
|
||||
__u32 flags;
|
||||
__u64 guest_phys_addr;
|
||||
__u64 memory_size;
|
||||
__u64 target_phys_addr;
|
||||
};
|
||||
|
||||
/* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */
|
||||
struct kvm_pic_state {
|
||||
__u8 last_irr; /* edge detection */
|
||||
__u8 irr; /* interrupt request register */
|
||||
__u8 imr; /* interrupt mask register */
|
||||
__u8 isr; /* interrupt service register */
|
||||
__u8 priority_add; /* highest irq priority */
|
||||
__u8 irq_base;
|
||||
__u8 read_reg_select;
|
||||
__u8 poll;
|
||||
__u8 special_mask;
|
||||
__u8 init_state;
|
||||
__u8 auto_eoi;
|
||||
__u8 rotate_on_auto_eoi;
|
||||
__u8 special_fully_nested_mode;
|
||||
__u8 init4; /* true if 4 byte init */
|
||||
__u8 elcr; /* PIIX edge/trigger selection */
|
||||
__u8 elcr_mask;
|
||||
};
|
||||
|
||||
#define KVM_IOAPIC_NUM_PINS 24
|
||||
struct kvm_ioapic_state {
|
||||
__u64 base_address;
|
||||
__u32 ioregsel;
|
||||
__u32 id;
|
||||
__u32 irr;
|
||||
__u32 pad;
|
||||
union {
|
||||
__u64 bits;
|
||||
struct {
|
||||
__u8 vector;
|
||||
__u8 delivery_mode:3;
|
||||
__u8 dest_mode:1;
|
||||
__u8 delivery_status:1;
|
||||
__u8 polarity:1;
|
||||
__u8 remote_irr:1;
|
||||
__u8 trig_mode:1;
|
||||
__u8 mask:1;
|
||||
__u8 reserve:7;
|
||||
__u8 reserved[4];
|
||||
__u8 dest_id;
|
||||
} fields;
|
||||
} redirtbl[KVM_IOAPIC_NUM_PINS];
|
||||
};
|
||||
|
||||
#define KVM_IRQCHIP_PIC_MASTER 0
|
||||
#define KVM_IRQCHIP_PIC_SLAVE 1
|
||||
#define KVM_IRQCHIP_IOAPIC 2
|
||||
#define KVM_NR_IRQCHIPS 3
|
||||
|
||||
#define KVM_RUN_X86_SMM (1 << 0)
|
||||
|
||||
/* for KVM_GET_REGS and KVM_SET_REGS */
|
||||
struct kvm_regs {
|
||||
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
|
||||
__u64 rax, rbx, rcx, rdx;
|
||||
__u64 rsi, rdi, rsp, rbp;
|
||||
__u64 r8, r9, r10, r11;
|
||||
__u64 r12, r13, r14, r15;
|
||||
__u64 rip, rflags;
|
||||
};
|
||||
|
||||
/* for KVM_GET_LAPIC and KVM_SET_LAPIC */
|
||||
#define KVM_APIC_REG_SIZE 0x400
|
||||
struct kvm_lapic_state {
|
||||
char regs[KVM_APIC_REG_SIZE];
|
||||
};
|
||||
|
||||
struct kvm_segment {
|
||||
__u64 base;
|
||||
__u32 limit;
|
||||
__u16 selector;
|
||||
__u8 type;
|
||||
__u8 present, dpl, db, s, l, g, avl;
|
||||
__u8 unusable;
|
||||
__u8 padding;
|
||||
};
|
||||
|
||||
struct kvm_dtable {
|
||||
__u64 base;
|
||||
__u16 limit;
|
||||
__u16 padding[3];
|
||||
};
|
||||
|
||||
|
||||
/* for KVM_GET_SREGS and KVM_SET_SREGS */
|
||||
struct kvm_sregs {
|
||||
/* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */
|
||||
struct kvm_segment cs, ds, es, fs, gs, ss;
|
||||
struct kvm_segment tr, ldt;
|
||||
struct kvm_dtable gdt, idt;
|
||||
__u64 cr0, cr2, cr3, cr4, cr8;
|
||||
__u64 efer;
|
||||
__u64 apic_base;
|
||||
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
|
||||
};
|
||||
|
||||
/* for KVM_GET_FPU and KVM_SET_FPU */
|
||||
struct kvm_fpu {
|
||||
__u8 fpr[8][16];
|
||||
__u16 fcw;
|
||||
__u16 fsw;
|
||||
__u8 ftwx; /* in fxsave format */
|
||||
__u8 pad1;
|
||||
__u16 last_opcode;
|
||||
__u64 last_ip;
|
||||
__u64 last_dp;
|
||||
__u8 xmm[16][16];
|
||||
__u32 mxcsr;
|
||||
__u32 pad2;
|
||||
};
|
||||
|
||||
struct kvm_msr_entry {
|
||||
__u32 index;
|
||||
__u32 reserved;
|
||||
__u64 data;
|
||||
};
|
||||
|
||||
/* for KVM_GET_MSRS and KVM_SET_MSRS */
|
||||
struct kvm_msrs {
|
||||
__u32 nmsrs; /* number of msrs in entries */
|
||||
__u32 pad;
|
||||
|
||||
struct kvm_msr_entry entries[0];
|
||||
};
|
||||
|
||||
/* for KVM_GET_MSR_INDEX_LIST */
|
||||
struct kvm_msr_list {
|
||||
__u32 nmsrs; /* number of msrs in entries */
|
||||
__u32 indices[0];
|
||||
};
|
||||
|
||||
|
||||
struct kvm_cpuid_entry {
|
||||
__u32 function;
|
||||
__u32 eax;
|
||||
__u32 ebx;
|
||||
__u32 ecx;
|
||||
__u32 edx;
|
||||
__u32 padding;
|
||||
};
|
||||
|
||||
/* for KVM_SET_CPUID */
|
||||
struct kvm_cpuid {
|
||||
__u32 nent;
|
||||
__u32 padding;
|
||||
struct kvm_cpuid_entry entries[0];
|
||||
};
|
||||
|
||||
struct kvm_cpuid_entry2 {
|
||||
__u32 function;
|
||||
__u32 index;
|
||||
__u32 flags;
|
||||
__u32 eax;
|
||||
__u32 ebx;
|
||||
__u32 ecx;
|
||||
__u32 edx;
|
||||
__u32 padding[3];
|
||||
};
|
||||
|
||||
#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX (1 << 0)
|
||||
#define KVM_CPUID_FLAG_STATEFUL_FUNC (1 << 1)
|
||||
#define KVM_CPUID_FLAG_STATE_READ_NEXT (1 << 2)
|
||||
|
||||
/* for KVM_SET_CPUID2 */
|
||||
struct kvm_cpuid2 {
|
||||
__u32 nent;
|
||||
__u32 padding;
|
||||
struct kvm_cpuid_entry2 entries[0];
|
||||
};
|
||||
|
||||
/* for KVM_GET_PIT and KVM_SET_PIT */
|
||||
struct kvm_pit_channel_state {
|
||||
__u32 count; /* can be 65536 */
|
||||
__u16 latched_count;
|
||||
__u8 count_latched;
|
||||
__u8 status_latched;
|
||||
__u8 status;
|
||||
__u8 read_state;
|
||||
__u8 write_state;
|
||||
__u8 write_latch;
|
||||
__u8 rw_mode;
|
||||
__u8 mode;
|
||||
__u8 bcd;
|
||||
__u8 gate;
|
||||
__s64 count_load_time;
|
||||
};
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
__u32 exception;
|
||||
__u32 pad;
|
||||
__u64 pc;
|
||||
__u64 dr6;
|
||||
__u64 dr7;
|
||||
};
|
||||
|
||||
#define KVM_GUESTDBG_USE_SW_BP 0x00010000
|
||||
#define KVM_GUESTDBG_USE_HW_BP 0x00020000
|
||||
#define KVM_GUESTDBG_INJECT_DB 0x00040000
|
||||
#define KVM_GUESTDBG_INJECT_BP 0x00080000
|
||||
|
||||
/* for KVM_SET_GUEST_DEBUG */
|
||||
struct kvm_guest_debug_arch {
|
||||
__u64 debugreg[8];
|
||||
};
|
||||
|
||||
struct kvm_pit_state {
|
||||
struct kvm_pit_channel_state channels[3];
|
||||
};
|
||||
|
||||
#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
|
||||
|
||||
struct kvm_pit_state2 {
|
||||
struct kvm_pit_channel_state channels[3];
|
||||
__u32 flags;
|
||||
__u32 reserved[9];
|
||||
};
|
||||
|
||||
struct kvm_reinject_control {
|
||||
__u8 pit_reinject;
|
||||
__u8 reserved[31];
|
||||
};
|
||||
|
||||
/* When set in flags, include corresponding fields on KVM_SET_VCPU_EVENTS */
|
||||
#define KVM_VCPUEVENT_VALID_NMI_PENDING 0x00000001
|
||||
#define KVM_VCPUEVENT_VALID_SIPI_VECTOR 0x00000002
|
||||
#define KVM_VCPUEVENT_VALID_SHADOW 0x00000004
|
||||
#define KVM_VCPUEVENT_VALID_SMM 0x00000008
|
||||
|
||||
/* Interrupt shadow states */
|
||||
#define KVM_X86_SHADOW_INT_MOV_SS 0x01
|
||||
#define KVM_X86_SHADOW_INT_STI 0x02
|
||||
|
||||
/* for KVM_GET/SET_VCPU_EVENTS */
|
||||
struct kvm_vcpu_events {
|
||||
struct {
|
||||
__u8 injected;
|
||||
__u8 nr;
|
||||
__u8 has_error_code;
|
||||
__u8 pad;
|
||||
__u32 error_code;
|
||||
} exception;
|
||||
struct {
|
||||
__u8 injected;
|
||||
__u8 nr;
|
||||
__u8 soft;
|
||||
__u8 shadow;
|
||||
} interrupt;
|
||||
struct {
|
||||
__u8 injected;
|
||||
__u8 pending;
|
||||
__u8 masked;
|
||||
__u8 pad;
|
||||
} nmi;
|
||||
__u32 sipi_vector;
|
||||
__u32 flags;
|
||||
struct {
|
||||
__u8 smm;
|
||||
__u8 pending;
|
||||
__u8 smm_inside_nmi;
|
||||
__u8 latched_init;
|
||||
} smi;
|
||||
__u32 reserved[9];
|
||||
};
|
||||
|
||||
/* for KVM_GET/SET_DEBUGREGS */
|
||||
struct kvm_debugregs {
|
||||
__u64 db[4];
|
||||
__u64 dr6;
|
||||
__u64 dr7;
|
||||
__u64 flags;
|
||||
__u64 reserved[9];
|
||||
};
|
||||
|
||||
/* for KVM_CAP_XSAVE */
|
||||
struct kvm_xsave {
|
||||
__u32 region[1024];
|
||||
};
|
||||
|
||||
#define KVM_MAX_XCRS 16
|
||||
|
||||
struct kvm_xcr {
|
||||
__u32 xcr;
|
||||
__u32 reserved;
|
||||
__u64 value;
|
||||
};
|
||||
|
||||
struct kvm_xcrs {
|
||||
__u32 nr_xcrs;
|
||||
__u32 flags;
|
||||
struct kvm_xcr xcrs[KVM_MAX_XCRS];
|
||||
__u64 padding[16];
|
||||
};
|
||||
|
||||
/* definition of registers in kvm_run */
|
||||
struct kvm_sync_regs {
|
||||
};
|
||||
|
||||
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
|
||||
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
|
||||
|
||||
#endif /* _ASM_X86_KVM_H */
|
16
tools/arch/x86/include/uapi/asm/kvm_perf.h
Normal file
16
tools/arch/x86/include/uapi/asm/kvm_perf.h
Normal file
|
@ -0,0 +1,16 @@
|
|||
#ifndef _ASM_X86_KVM_PERF_H
|
||||
#define _ASM_X86_KVM_PERF_H
|
||||
|
||||
#include <asm/svm.h>
|
||||
#include <asm/vmx.h>
|
||||
#include <asm/kvm.h>
|
||||
|
||||
#define DECODE_STR_LEN 20
|
||||
|
||||
#define VCPU_ID "vcpu_id"
|
||||
|
||||
#define KVM_ENTRY_TRACE "kvm:kvm_entry"
|
||||
#define KVM_EXIT_TRACE "kvm:kvm_exit"
|
||||
#define KVM_EXIT_REASON "exit_reason"
|
||||
|
||||
#endif /* _ASM_X86_KVM_PERF_H */
|
33
tools/arch/x86/include/uapi/asm/perf_regs.h
Normal file
33
tools/arch/x86/include/uapi/asm/perf_regs.h
Normal file
|
@ -0,0 +1,33 @@
|
|||
#ifndef _ASM_X86_PERF_REGS_H
|
||||
#define _ASM_X86_PERF_REGS_H
|
||||
|
||||
enum perf_event_x86_regs {
|
||||
PERF_REG_X86_AX,
|
||||
PERF_REG_X86_BX,
|
||||
PERF_REG_X86_CX,
|
||||
PERF_REG_X86_DX,
|
||||
PERF_REG_X86_SI,
|
||||
PERF_REG_X86_DI,
|
||||
PERF_REG_X86_BP,
|
||||
PERF_REG_X86_SP,
|
||||
PERF_REG_X86_IP,
|
||||
PERF_REG_X86_FLAGS,
|
||||
PERF_REG_X86_CS,
|
||||
PERF_REG_X86_SS,
|
||||
PERF_REG_X86_DS,
|
||||
PERF_REG_X86_ES,
|
||||
PERF_REG_X86_FS,
|
||||
PERF_REG_X86_GS,
|
||||
PERF_REG_X86_R8,
|
||||
PERF_REG_X86_R9,
|
||||
PERF_REG_X86_R10,
|
||||
PERF_REG_X86_R11,
|
||||
PERF_REG_X86_R12,
|
||||
PERF_REG_X86_R13,
|
||||
PERF_REG_X86_R14,
|
||||
PERF_REG_X86_R15,
|
||||
|
||||
PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1,
|
||||
PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1,
|
||||
};
|
||||
#endif /* _ASM_X86_PERF_REGS_H */
|
178
tools/arch/x86/include/uapi/asm/svm.h
Normal file
178
tools/arch/x86/include/uapi/asm/svm.h
Normal file
|
@ -0,0 +1,178 @@
|
|||
#ifndef _UAPI__SVM_H
|
||||
#define _UAPI__SVM_H
|
||||
|
||||
#define SVM_EXIT_READ_CR0 0x000
|
||||
#define SVM_EXIT_READ_CR2 0x002
|
||||
#define SVM_EXIT_READ_CR3 0x003
|
||||
#define SVM_EXIT_READ_CR4 0x004
|
||||
#define SVM_EXIT_READ_CR8 0x008
|
||||
#define SVM_EXIT_WRITE_CR0 0x010
|
||||
#define SVM_EXIT_WRITE_CR2 0x012
|
||||
#define SVM_EXIT_WRITE_CR3 0x013
|
||||
#define SVM_EXIT_WRITE_CR4 0x014
|
||||
#define SVM_EXIT_WRITE_CR8 0x018
|
||||
#define SVM_EXIT_READ_DR0 0x020
|
||||
#define SVM_EXIT_READ_DR1 0x021
|
||||
#define SVM_EXIT_READ_DR2 0x022
|
||||
#define SVM_EXIT_READ_DR3 0x023
|
||||
#define SVM_EXIT_READ_DR4 0x024
|
||||
#define SVM_EXIT_READ_DR5 0x025
|
||||
#define SVM_EXIT_READ_DR6 0x026
|
||||
#define SVM_EXIT_READ_DR7 0x027
|
||||
#define SVM_EXIT_WRITE_DR0 0x030
|
||||
#define SVM_EXIT_WRITE_DR1 0x031
|
||||
#define SVM_EXIT_WRITE_DR2 0x032
|
||||
#define SVM_EXIT_WRITE_DR3 0x033
|
||||
#define SVM_EXIT_WRITE_DR4 0x034
|
||||
#define SVM_EXIT_WRITE_DR5 0x035
|
||||
#define SVM_EXIT_WRITE_DR6 0x036
|
||||
#define SVM_EXIT_WRITE_DR7 0x037
|
||||
#define SVM_EXIT_EXCP_BASE 0x040
|
||||
#define SVM_EXIT_INTR 0x060
|
||||
#define SVM_EXIT_NMI 0x061
|
||||
#define SVM_EXIT_SMI 0x062
|
||||
#define SVM_EXIT_INIT 0x063
|
||||
#define SVM_EXIT_VINTR 0x064
|
||||
#define SVM_EXIT_CR0_SEL_WRITE 0x065
|
||||
#define SVM_EXIT_IDTR_READ 0x066
|
||||
#define SVM_EXIT_GDTR_READ 0x067
|
||||
#define SVM_EXIT_LDTR_READ 0x068
|
||||
#define SVM_EXIT_TR_READ 0x069
|
||||
#define SVM_EXIT_IDTR_WRITE 0x06a
|
||||
#define SVM_EXIT_GDTR_WRITE 0x06b
|
||||
#define SVM_EXIT_LDTR_WRITE 0x06c
|
||||
#define SVM_EXIT_TR_WRITE 0x06d
|
||||
#define SVM_EXIT_RDTSC 0x06e
|
||||
#define SVM_EXIT_RDPMC 0x06f
|
||||
#define SVM_EXIT_PUSHF 0x070
|
||||
#define SVM_EXIT_POPF 0x071
|
||||
#define SVM_EXIT_CPUID 0x072
|
||||
#define SVM_EXIT_RSM 0x073
|
||||
#define SVM_EXIT_IRET 0x074
|
||||
#define SVM_EXIT_SWINT 0x075
|
||||
#define SVM_EXIT_INVD 0x076
|
||||
#define SVM_EXIT_PAUSE 0x077
|
||||
#define SVM_EXIT_HLT 0x078
|
||||
#define SVM_EXIT_INVLPG 0x079
|
||||
#define SVM_EXIT_INVLPGA 0x07a
|
||||
#define SVM_EXIT_IOIO 0x07b
|
||||
#define SVM_EXIT_MSR 0x07c
|
||||
#define SVM_EXIT_TASK_SWITCH 0x07d
|
||||
#define SVM_EXIT_FERR_FREEZE 0x07e
|
||||
#define SVM_EXIT_SHUTDOWN 0x07f
|
||||
#define SVM_EXIT_VMRUN 0x080
|
||||
#define SVM_EXIT_VMMCALL 0x081
|
||||
#define SVM_EXIT_VMLOAD 0x082
|
||||
#define SVM_EXIT_VMSAVE 0x083
|
||||
#define SVM_EXIT_STGI 0x084
|
||||
#define SVM_EXIT_CLGI 0x085
|
||||
#define SVM_EXIT_SKINIT 0x086
|
||||
#define SVM_EXIT_RDTSCP 0x087
|
||||
#define SVM_EXIT_ICEBP 0x088
|
||||
#define SVM_EXIT_WBINVD 0x089
|
||||
#define SVM_EXIT_MONITOR 0x08a
|
||||
#define SVM_EXIT_MWAIT 0x08b
|
||||
#define SVM_EXIT_MWAIT_COND 0x08c
|
||||
#define SVM_EXIT_XSETBV 0x08d
|
||||
#define SVM_EXIT_NPF 0x400
|
||||
#define SVM_EXIT_AVIC_INCOMPLETE_IPI 0x401
|
||||
#define SVM_EXIT_AVIC_UNACCELERATED_ACCESS 0x402
|
||||
|
||||
#define SVM_EXIT_ERR -1
|
||||
|
||||
#define SVM_EXIT_REASONS \
|
||||
{ SVM_EXIT_READ_CR0, "read_cr0" }, \
|
||||
{ SVM_EXIT_READ_CR2, "read_cr2" }, \
|
||||
{ SVM_EXIT_READ_CR3, "read_cr3" }, \
|
||||
{ SVM_EXIT_READ_CR4, "read_cr4" }, \
|
||||
{ SVM_EXIT_READ_CR8, "read_cr8" }, \
|
||||
{ SVM_EXIT_WRITE_CR0, "write_cr0" }, \
|
||||
{ SVM_EXIT_WRITE_CR2, "write_cr2" }, \
|
||||
{ SVM_EXIT_WRITE_CR3, "write_cr3" }, \
|
||||
{ SVM_EXIT_WRITE_CR4, "write_cr4" }, \
|
||||
{ SVM_EXIT_WRITE_CR8, "write_cr8" }, \
|
||||
{ SVM_EXIT_READ_DR0, "read_dr0" }, \
|
||||
{ SVM_EXIT_READ_DR1, "read_dr1" }, \
|
||||
{ SVM_EXIT_READ_DR2, "read_dr2" }, \
|
||||
{ SVM_EXIT_READ_DR3, "read_dr3" }, \
|
||||
{ SVM_EXIT_READ_DR4, "read_dr4" }, \
|
||||
{ SVM_EXIT_READ_DR5, "read_dr5" }, \
|
||||
{ SVM_EXIT_READ_DR6, "read_dr6" }, \
|
||||
{ SVM_EXIT_READ_DR7, "read_dr7" }, \
|
||||
{ SVM_EXIT_WRITE_DR0, "write_dr0" }, \
|
||||
{ SVM_EXIT_WRITE_DR1, "write_dr1" }, \
|
||||
{ SVM_EXIT_WRITE_DR2, "write_dr2" }, \
|
||||
{ SVM_EXIT_WRITE_DR3, "write_dr3" }, \
|
||||
{ SVM_EXIT_WRITE_DR4, "write_dr4" }, \
|
||||
{ SVM_EXIT_WRITE_DR5, "write_dr5" }, \
|
||||
{ SVM_EXIT_WRITE_DR6, "write_dr6" }, \
|
||||
{ SVM_EXIT_WRITE_DR7, "write_dr7" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + DE_VECTOR, "DE excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + OF_VECTOR, "OF excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + BR_VECTOR, "BR excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + DF_VECTOR, "DF excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + TS_VECTOR, "TS excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + NP_VECTOR, "NP excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + SS_VECTOR, "SS excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + GP_VECTOR, "GP excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + MF_VECTOR, "MF excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + AC_VECTOR, "AC excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \
|
||||
{ SVM_EXIT_EXCP_BASE + XM_VECTOR, "XF excp" }, \
|
||||
{ SVM_EXIT_INTR, "interrupt" }, \
|
||||
{ SVM_EXIT_NMI, "nmi" }, \
|
||||
{ SVM_EXIT_SMI, "smi" }, \
|
||||
{ SVM_EXIT_INIT, "init" }, \
|
||||
{ SVM_EXIT_VINTR, "vintr" }, \
|
||||
{ SVM_EXIT_CR0_SEL_WRITE, "cr0_sel_write" }, \
|
||||
{ SVM_EXIT_IDTR_READ, "read_idtr" }, \
|
||||
{ SVM_EXIT_GDTR_READ, "read_gdtr" }, \
|
||||
{ SVM_EXIT_LDTR_READ, "read_ldtr" }, \
|
||||
{ SVM_EXIT_TR_READ, "read_rt" }, \
|
||||
{ SVM_EXIT_IDTR_WRITE, "write_idtr" }, \
|
||||
{ SVM_EXIT_GDTR_WRITE, "write_gdtr" }, \
|
||||
{ SVM_EXIT_LDTR_WRITE, "write_ldtr" }, \
|
||||
{ SVM_EXIT_TR_WRITE, "write_rt" }, \
|
||||
{ SVM_EXIT_RDTSC, "rdtsc" }, \
|
||||
{ SVM_EXIT_RDPMC, "rdpmc" }, \
|
||||
{ SVM_EXIT_PUSHF, "pushf" }, \
|
||||
{ SVM_EXIT_POPF, "popf" }, \
|
||||
{ SVM_EXIT_CPUID, "cpuid" }, \
|
||||
{ SVM_EXIT_RSM, "rsm" }, \
|
||||
{ SVM_EXIT_IRET, "iret" }, \
|
||||
{ SVM_EXIT_SWINT, "swint" }, \
|
||||
{ SVM_EXIT_INVD, "invd" }, \
|
||||
{ SVM_EXIT_PAUSE, "pause" }, \
|
||||
{ SVM_EXIT_HLT, "hlt" }, \
|
||||
{ SVM_EXIT_INVLPG, "invlpg" }, \
|
||||
{ SVM_EXIT_INVLPGA, "invlpga" }, \
|
||||
{ SVM_EXIT_IOIO, "io" }, \
|
||||
{ SVM_EXIT_MSR, "msr" }, \
|
||||
{ SVM_EXIT_TASK_SWITCH, "task_switch" }, \
|
||||
{ SVM_EXIT_FERR_FREEZE, "ferr_freeze" }, \
|
||||
{ SVM_EXIT_SHUTDOWN, "shutdown" }, \
|
||||
{ SVM_EXIT_VMRUN, "vmrun" }, \
|
||||
{ SVM_EXIT_VMMCALL, "hypercall" }, \
|
||||
{ SVM_EXIT_VMLOAD, "vmload" }, \
|
||||
{ SVM_EXIT_VMSAVE, "vmsave" }, \
|
||||
{ SVM_EXIT_STGI, "stgi" }, \
|
||||
{ SVM_EXIT_CLGI, "clgi" }, \
|
||||
{ SVM_EXIT_SKINIT, "skinit" }, \
|
||||
{ SVM_EXIT_RDTSCP, "rdtscp" }, \
|
||||
{ SVM_EXIT_ICEBP, "icebp" }, \
|
||||
{ SVM_EXIT_WBINVD, "wbinvd" }, \
|
||||
{ SVM_EXIT_MONITOR, "monitor" }, \
|
||||
{ SVM_EXIT_MWAIT, "mwait" }, \
|
||||
{ SVM_EXIT_XSETBV, "xsetbv" }, \
|
||||
{ SVM_EXIT_NPF, "npf" }, \
|
||||
{ SVM_EXIT_AVIC_INCOMPLETE_IPI, "avic_incomplete_ipi" }, \
|
||||
{ SVM_EXIT_AVIC_UNACCELERATED_ACCESS, "avic_unaccelerated_access" }, \
|
||||
{ SVM_EXIT_ERR, "invalid_guest_state" }
|
||||
|
||||
|
||||
#endif /* _UAPI__SVM_H */
|
136
tools/arch/x86/include/uapi/asm/vmx.h
Normal file
136
tools/arch/x86/include/uapi/asm/vmx.h
Normal file
|
@ -0,0 +1,136 @@
|
|||
/*
|
||||
* vmx.h: VMX Architecture related definitions
|
||||
* Copyright (c) 2004, Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
* version 2, as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along with
|
||||
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
||||
* Place - Suite 330, Boston, MA 02111-1307 USA.
|
||||
*
|
||||
* A few random additions are:
|
||||
* Copyright (C) 2006 Qumranet
|
||||
* Avi Kivity <avi@qumranet.com>
|
||||
* Yaniv Kamay <yaniv@qumranet.com>
|
||||
*
|
||||
*/
|
||||
#ifndef _UAPIVMX_H
|
||||
#define _UAPIVMX_H
|
||||
|
||||
|
||||
#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000
|
||||
|
||||
#define EXIT_REASON_EXCEPTION_NMI 0
|
||||
#define EXIT_REASON_EXTERNAL_INTERRUPT 1
|
||||
#define EXIT_REASON_TRIPLE_FAULT 2
|
||||
|
||||
#define EXIT_REASON_PENDING_INTERRUPT 7
|
||||
#define EXIT_REASON_NMI_WINDOW 8
|
||||
#define EXIT_REASON_TASK_SWITCH 9
|
||||
#define EXIT_REASON_CPUID 10
|
||||
#define EXIT_REASON_HLT 12
|
||||
#define EXIT_REASON_INVD 13
|
||||
#define EXIT_REASON_INVLPG 14
|
||||
#define EXIT_REASON_RDPMC 15
|
||||
#define EXIT_REASON_RDTSC 16
|
||||
#define EXIT_REASON_VMCALL 18
|
||||
#define EXIT_REASON_VMCLEAR 19
|
||||
#define EXIT_REASON_VMLAUNCH 20
|
||||
#define EXIT_REASON_VMPTRLD 21
|
||||
#define EXIT_REASON_VMPTRST 22
|
||||
#define EXIT_REASON_VMREAD 23
|
||||
#define EXIT_REASON_VMRESUME 24
|
||||
#define EXIT_REASON_VMWRITE 25
|
||||
#define EXIT_REASON_VMOFF 26
|
||||
#define EXIT_REASON_VMON 27
|
||||
#define EXIT_REASON_CR_ACCESS 28
|
||||
#define EXIT_REASON_DR_ACCESS 29
|
||||
#define EXIT_REASON_IO_INSTRUCTION 30
|
||||
#define EXIT_REASON_MSR_READ 31
|
||||
#define EXIT_REASON_MSR_WRITE 32
|
||||
#define EXIT_REASON_INVALID_STATE 33
|
||||
#define EXIT_REASON_MSR_LOAD_FAIL 34
|
||||
#define EXIT_REASON_MWAIT_INSTRUCTION 36
|
||||
#define EXIT_REASON_MONITOR_TRAP_FLAG 37
|
||||
#define EXIT_REASON_MONITOR_INSTRUCTION 39
|
||||
#define EXIT_REASON_PAUSE_INSTRUCTION 40
|
||||
#define EXIT_REASON_MCE_DURING_VMENTRY 41
|
||||
#define EXIT_REASON_TPR_BELOW_THRESHOLD 43
|
||||
#define EXIT_REASON_APIC_ACCESS 44
|
||||
#define EXIT_REASON_EOI_INDUCED 45
|
||||
#define EXIT_REASON_EPT_VIOLATION 48
|
||||
#define EXIT_REASON_EPT_MISCONFIG 49
|
||||
#define EXIT_REASON_INVEPT 50
|
||||
#define EXIT_REASON_RDTSCP 51
|
||||
#define EXIT_REASON_PREEMPTION_TIMER 52
|
||||
#define EXIT_REASON_INVVPID 53
|
||||
#define EXIT_REASON_WBINVD 54
|
||||
#define EXIT_REASON_XSETBV 55
|
||||
#define EXIT_REASON_APIC_WRITE 56
|
||||
#define EXIT_REASON_INVPCID 58
|
||||
#define EXIT_REASON_PML_FULL 62
|
||||
#define EXIT_REASON_XSAVES 63
|
||||
#define EXIT_REASON_XRSTORS 64
|
||||
#define EXIT_REASON_PCOMMIT 65
|
||||
|
||||
#define VMX_EXIT_REASONS \
|
||||
{ EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \
|
||||
{ EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \
|
||||
{ EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \
|
||||
{ EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \
|
||||
{ EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \
|
||||
{ EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \
|
||||
{ EXIT_REASON_CPUID, "CPUID" }, \
|
||||
{ EXIT_REASON_HLT, "HLT" }, \
|
||||
{ EXIT_REASON_INVLPG, "INVLPG" }, \
|
||||
{ EXIT_REASON_RDPMC, "RDPMC" }, \
|
||||
{ EXIT_REASON_RDTSC, "RDTSC" }, \
|
||||
{ EXIT_REASON_VMCALL, "VMCALL" }, \
|
||||
{ EXIT_REASON_VMCLEAR, "VMCLEAR" }, \
|
||||
{ EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \
|
||||
{ EXIT_REASON_VMPTRLD, "VMPTRLD" }, \
|
||||
{ EXIT_REASON_VMPTRST, "VMPTRST" }, \
|
||||
{ EXIT_REASON_VMREAD, "VMREAD" }, \
|
||||
{ EXIT_REASON_VMRESUME, "VMRESUME" }, \
|
||||
{ EXIT_REASON_VMWRITE, "VMWRITE" }, \
|
||||
{ EXIT_REASON_VMOFF, "VMOFF" }, \
|
||||
{ EXIT_REASON_VMON, "VMON" }, \
|
||||
{ EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \
|
||||
{ EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \
|
||||
{ EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MSR_READ, "MSR_READ" }, \
|
||||
{ EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \
|
||||
{ EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MONITOR_TRAP_FLAG, "MONITOR_TRAP_FLAG" }, \
|
||||
{ EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \
|
||||
{ EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \
|
||||
{ EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \
|
||||
{ EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \
|
||||
{ EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \
|
||||
{ EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \
|
||||
{ EXIT_REASON_INVEPT, "INVEPT" }, \
|
||||
{ EXIT_REASON_PREEMPTION_TIMER, "PREEMPTION_TIMER" }, \
|
||||
{ EXIT_REASON_WBINVD, "WBINVD" }, \
|
||||
{ EXIT_REASON_APIC_WRITE, "APIC_WRITE" }, \
|
||||
{ EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
|
||||
{ EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
|
||||
{ EXIT_REASON_MSR_LOAD_FAIL, "MSR_LOAD_FAIL" }, \
|
||||
{ EXIT_REASON_INVD, "INVD" }, \
|
||||
{ EXIT_REASON_INVVPID, "INVVPID" }, \
|
||||
{ EXIT_REASON_INVPCID, "INVPCID" }, \
|
||||
{ EXIT_REASON_XSAVES, "XSAVES" }, \
|
||||
{ EXIT_REASON_XRSTORS, "XRSTORS" }, \
|
||||
{ EXIT_REASON_PCOMMIT, "PCOMMIT" }
|
||||
|
||||
#define VMX_ABORT_SAVE_GUEST_MSR_FAIL 1
|
||||
#define VMX_ABORT_LOAD_HOST_MSR_FAIL 4
|
||||
|
||||
#endif /* _UAPIVMX_H */
|
297
tools/arch/x86/lib/memcpy_64.S
Normal file
297
tools/arch/x86/lib/memcpy_64.S
Normal file
|
@ -0,0 +1,297 @@
|
|||
/* Copyright 2002 Andi Kleen */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
/*
|
||||
* We build a jump to memcpy_orig by default which gets NOPped out on
|
||||
* the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
|
||||
* have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
|
||||
* to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
|
||||
*/
|
||||
|
||||
.weak memcpy
|
||||
|
||||
/*
|
||||
* memcpy - Copy a memory block.
|
||||
*
|
||||
* Input:
|
||||
* rdi destination
|
||||
* rsi source
|
||||
* rdx count
|
||||
*
|
||||
* Output:
|
||||
* rax original destination
|
||||
*/
|
||||
ENTRY(__memcpy)
|
||||
ENTRY(memcpy)
|
||||
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
|
||||
"jmp memcpy_erms", X86_FEATURE_ERMS
|
||||
|
||||
movq %rdi, %rax
|
||||
movq %rdx, %rcx
|
||||
shrq $3, %rcx
|
||||
andl $7, %edx
|
||||
rep movsq
|
||||
movl %edx, %ecx
|
||||
rep movsb
|
||||
ret
|
||||
ENDPROC(memcpy)
|
||||
ENDPROC(__memcpy)
|
||||
|
||||
/*
|
||||
* memcpy_erms() - enhanced fast string memcpy. This is faster and
|
||||
* simpler than memcpy. Use memcpy_erms when possible.
|
||||
*/
|
||||
ENTRY(memcpy_erms)
|
||||
movq %rdi, %rax
|
||||
movq %rdx, %rcx
|
||||
rep movsb
|
||||
ret
|
||||
ENDPROC(memcpy_erms)
|
||||
|
||||
ENTRY(memcpy_orig)
|
||||
movq %rdi, %rax
|
||||
|
||||
cmpq $0x20, %rdx
|
||||
jb .Lhandle_tail
|
||||
|
||||
/*
|
||||
* We check whether memory false dependence could occur,
|
||||
* then jump to corresponding copy mode.
|
||||
*/
|
||||
cmp %dil, %sil
|
||||
jl .Lcopy_backward
|
||||
subq $0x20, %rdx
|
||||
.Lcopy_forward_loop:
|
||||
subq $0x20, %rdx
|
||||
|
||||
/*
|
||||
* Move in blocks of 4x8 bytes:
|
||||
*/
|
||||
movq 0*8(%rsi), %r8
|
||||
movq 1*8(%rsi), %r9
|
||||
movq 2*8(%rsi), %r10
|
||||
movq 3*8(%rsi), %r11
|
||||
leaq 4*8(%rsi), %rsi
|
||||
|
||||
movq %r8, 0*8(%rdi)
|
||||
movq %r9, 1*8(%rdi)
|
||||
movq %r10, 2*8(%rdi)
|
||||
movq %r11, 3*8(%rdi)
|
||||
leaq 4*8(%rdi), %rdi
|
||||
jae .Lcopy_forward_loop
|
||||
addl $0x20, %edx
|
||||
jmp .Lhandle_tail
|
||||
|
||||
.Lcopy_backward:
|
||||
/*
|
||||
* Calculate copy position to tail.
|
||||
*/
|
||||
addq %rdx, %rsi
|
||||
addq %rdx, %rdi
|
||||
subq $0x20, %rdx
|
||||
/*
|
||||
* At most 3 ALU operations in one cycle,
|
||||
* so append NOPS in the same 16 bytes trunk.
|
||||
*/
|
||||
.p2align 4
|
||||
.Lcopy_backward_loop:
|
||||
subq $0x20, %rdx
|
||||
movq -1*8(%rsi), %r8
|
||||
movq -2*8(%rsi), %r9
|
||||
movq -3*8(%rsi), %r10
|
||||
movq -4*8(%rsi), %r11
|
||||
leaq -4*8(%rsi), %rsi
|
||||
movq %r8, -1*8(%rdi)
|
||||
movq %r9, -2*8(%rdi)
|
||||
movq %r10, -3*8(%rdi)
|
||||
movq %r11, -4*8(%rdi)
|
||||
leaq -4*8(%rdi), %rdi
|
||||
jae .Lcopy_backward_loop
|
||||
|
||||
/*
|
||||
* Calculate copy position to head.
|
||||
*/
|
||||
addl $0x20, %edx
|
||||
subq %rdx, %rsi
|
||||
subq %rdx, %rdi
|
||||
.Lhandle_tail:
|
||||
cmpl $16, %edx
|
||||
jb .Lless_16bytes
|
||||
|
||||
/*
|
||||
* Move data from 16 bytes to 31 bytes.
|
||||
*/
|
||||
movq 0*8(%rsi), %r8
|
||||
movq 1*8(%rsi), %r9
|
||||
movq -2*8(%rsi, %rdx), %r10
|
||||
movq -1*8(%rsi, %rdx), %r11
|
||||
movq %r8, 0*8(%rdi)
|
||||
movq %r9, 1*8(%rdi)
|
||||
movq %r10, -2*8(%rdi, %rdx)
|
||||
movq %r11, -1*8(%rdi, %rdx)
|
||||
retq
|
||||
.p2align 4
|
||||
.Lless_16bytes:
|
||||
cmpl $8, %edx
|
||||
jb .Lless_8bytes
|
||||
/*
|
||||
* Move data from 8 bytes to 15 bytes.
|
||||
*/
|
||||
movq 0*8(%rsi), %r8
|
||||
movq -1*8(%rsi, %rdx), %r9
|
||||
movq %r8, 0*8(%rdi)
|
||||
movq %r9, -1*8(%rdi, %rdx)
|
||||
retq
|
||||
.p2align 4
|
||||
.Lless_8bytes:
|
||||
cmpl $4, %edx
|
||||
jb .Lless_3bytes
|
||||
|
||||
/*
|
||||
* Move data from 4 bytes to 7 bytes.
|
||||
*/
|
||||
movl (%rsi), %ecx
|
||||
movl -4(%rsi, %rdx), %r8d
|
||||
movl %ecx, (%rdi)
|
||||
movl %r8d, -4(%rdi, %rdx)
|
||||
retq
|
||||
.p2align 4
|
||||
.Lless_3bytes:
|
||||
subl $1, %edx
|
||||
jb .Lend
|
||||
/*
|
||||
* Move data from 1 bytes to 3 bytes.
|
||||
*/
|
||||
movzbl (%rsi), %ecx
|
||||
jz .Lstore_1byte
|
||||
movzbq 1(%rsi), %r8
|
||||
movzbq (%rsi, %rdx), %r9
|
||||
movb %r8b, 1(%rdi)
|
||||
movb %r9b, (%rdi, %rdx)
|
||||
.Lstore_1byte:
|
||||
movb %cl, (%rdi)
|
||||
|
||||
.Lend:
|
||||
retq
|
||||
ENDPROC(memcpy_orig)
|
||||
|
||||
#ifndef CONFIG_UML
|
||||
/*
|
||||
* memcpy_mcsafe - memory copy with machine check exception handling
|
||||
* Note that we only catch machine checks when reading the source addresses.
|
||||
* Writes to target are posted and don't generate machine checks.
|
||||
*/
|
||||
ENTRY(memcpy_mcsafe)
|
||||
cmpl $8, %edx
|
||||
/* Less than 8 bytes? Go to byte copy loop */
|
||||
jb .L_no_whole_words
|
||||
|
||||
/* Check for bad alignment of source */
|
||||
testl $7, %esi
|
||||
/* Already aligned */
|
||||
jz .L_8byte_aligned
|
||||
|
||||
/* Copy one byte at a time until source is 8-byte aligned */
|
||||
movl %esi, %ecx
|
||||
andl $7, %ecx
|
||||
subl $8, %ecx
|
||||
negl %ecx
|
||||
subl %ecx, %edx
|
||||
.L_copy_leading_bytes:
|
||||
movb (%rsi), %al
|
||||
movb %al, (%rdi)
|
||||
incq %rsi
|
||||
incq %rdi
|
||||
decl %ecx
|
||||
jnz .L_copy_leading_bytes
|
||||
|
||||
.L_8byte_aligned:
|
||||
/* Figure out how many whole cache lines (64-bytes) to copy */
|
||||
movl %edx, %ecx
|
||||
andl $63, %edx
|
||||
shrl $6, %ecx
|
||||
jz .L_no_whole_cache_lines
|
||||
|
||||
/* Loop copying whole cache lines */
|
||||
.L_cache_w0: movq (%rsi), %r8
|
||||
.L_cache_w1: movq 1*8(%rsi), %r9
|
||||
.L_cache_w2: movq 2*8(%rsi), %r10
|
||||
.L_cache_w3: movq 3*8(%rsi), %r11
|
||||
movq %r8, (%rdi)
|
||||
movq %r9, 1*8(%rdi)
|
||||
movq %r10, 2*8(%rdi)
|
||||
movq %r11, 3*8(%rdi)
|
||||
.L_cache_w4: movq 4*8(%rsi), %r8
|
||||
.L_cache_w5: movq 5*8(%rsi), %r9
|
||||
.L_cache_w6: movq 6*8(%rsi), %r10
|
||||
.L_cache_w7: movq 7*8(%rsi), %r11
|
||||
movq %r8, 4*8(%rdi)
|
||||
movq %r9, 5*8(%rdi)
|
||||
movq %r10, 6*8(%rdi)
|
||||
movq %r11, 7*8(%rdi)
|
||||
leaq 64(%rsi), %rsi
|
||||
leaq 64(%rdi), %rdi
|
||||
decl %ecx
|
||||
jnz .L_cache_w0
|
||||
|
||||
/* Are there any trailing 8-byte words? */
|
||||
.L_no_whole_cache_lines:
|
||||
movl %edx, %ecx
|
||||
andl $7, %edx
|
||||
shrl $3, %ecx
|
||||
jz .L_no_whole_words
|
||||
|
||||
/* Copy trailing words */
|
||||
.L_copy_trailing_words:
|
||||
movq (%rsi), %r8
|
||||
mov %r8, (%rdi)
|
||||
leaq 8(%rsi), %rsi
|
||||
leaq 8(%rdi), %rdi
|
||||
decl %ecx
|
||||
jnz .L_copy_trailing_words
|
||||
|
||||
/* Any trailing bytes? */
|
||||
.L_no_whole_words:
|
||||
andl %edx, %edx
|
||||
jz .L_done_memcpy_trap
|
||||
|
||||
/* Copy trailing bytes */
|
||||
movl %edx, %ecx
|
||||
.L_copy_trailing_bytes:
|
||||
movb (%rsi), %al
|
||||
movb %al, (%rdi)
|
||||
incq %rsi
|
||||
incq %rdi
|
||||
decl %ecx
|
||||
jnz .L_copy_trailing_bytes
|
||||
|
||||
/* Copy successful. Return zero */
|
||||
.L_done_memcpy_trap:
|
||||
xorq %rax, %rax
|
||||
ret
|
||||
ENDPROC(memcpy_mcsafe)
|
||||
|
||||
.section .fixup, "ax"
|
||||
/* Return -EFAULT for any failure */
|
||||
.L_memcpy_mcsafe_fail:
|
||||
mov $-EFAULT, %rax
|
||||
ret
|
||||
|
||||
.previous
|
||||
|
||||
_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w6, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_cache_w7, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_copy_trailing_words, .L_memcpy_mcsafe_fail)
|
||||
_ASM_EXTABLE_FAULT(.L_copy_trailing_bytes, .L_memcpy_mcsafe_fail)
|
||||
#endif
|
138
tools/arch/x86/lib/memset_64.S
Normal file
138
tools/arch/x86/lib/memset_64.S
Normal file
|
@ -0,0 +1,138 @@
|
|||
/* Copyright 2002 Andi Kleen, SuSE Labs */
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/cpufeatures.h>
|
||||
#include <asm/alternative-asm.h>
|
||||
|
||||
.weak memset
|
||||
|
||||
/*
|
||||
* ISO C memset - set a memory block to a byte value. This function uses fast
|
||||
* string to get better performance than the original function. The code is
|
||||
* simpler and shorter than the original function as well.
|
||||
*
|
||||
* rdi destination
|
||||
* rsi value (char)
|
||||
* rdx count (bytes)
|
||||
*
|
||||
* rax original destination
|
||||
*/
|
||||
ENTRY(memset)
|
||||
ENTRY(__memset)
|
||||
/*
|
||||
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
|
||||
* to use it when possible. If not available, use fast string instructions.
|
||||
*
|
||||
* Otherwise, use original memset function.
|
||||
*/
|
||||
ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
|
||||
"jmp memset_erms", X86_FEATURE_ERMS
|
||||
|
||||
movq %rdi,%r9
|
||||
movq %rdx,%rcx
|
||||
andl $7,%edx
|
||||
shrq $3,%rcx
|
||||
/* expand byte value */
|
||||
movzbl %sil,%esi
|
||||
movabs $0x0101010101010101,%rax
|
||||
imulq %rsi,%rax
|
||||
rep stosq
|
||||
movl %edx,%ecx
|
||||
rep stosb
|
||||
movq %r9,%rax
|
||||
ret
|
||||
ENDPROC(memset)
|
||||
ENDPROC(__memset)
|
||||
|
||||
/*
|
||||
* ISO C memset - set a memory block to a byte value. This function uses
|
||||
* enhanced rep stosb to override the fast string function.
|
||||
* The code is simpler and shorter than the fast string function as well.
|
||||
*
|
||||
* rdi destination
|
||||
* rsi value (char)
|
||||
* rdx count (bytes)
|
||||
*
|
||||
* rax original destination
|
||||
*/
|
||||
ENTRY(memset_erms)
|
||||
movq %rdi,%r9
|
||||
movb %sil,%al
|
||||
movq %rdx,%rcx
|
||||
rep stosb
|
||||
movq %r9,%rax
|
||||
ret
|
||||
ENDPROC(memset_erms)
|
||||
|
||||
ENTRY(memset_orig)
|
||||
movq %rdi,%r10
|
||||
|
||||
/* expand byte value */
|
||||
movzbl %sil,%ecx
|
||||
movabs $0x0101010101010101,%rax
|
||||
imulq %rcx,%rax
|
||||
|
||||
/* align dst */
|
||||
movl %edi,%r9d
|
||||
andl $7,%r9d
|
||||
jnz .Lbad_alignment
|
||||
.Lafter_bad_alignment:
|
||||
|
||||
movq %rdx,%rcx
|
||||
shrq $6,%rcx
|
||||
jz .Lhandle_tail
|
||||
|
||||
.p2align 4
|
||||
.Lloop_64:
|
||||
decq %rcx
|
||||
movq %rax,(%rdi)
|
||||
movq %rax,8(%rdi)
|
||||
movq %rax,16(%rdi)
|
||||
movq %rax,24(%rdi)
|
||||
movq %rax,32(%rdi)
|
||||
movq %rax,40(%rdi)
|
||||
movq %rax,48(%rdi)
|
||||
movq %rax,56(%rdi)
|
||||
leaq 64(%rdi),%rdi
|
||||
jnz .Lloop_64
|
||||
|
||||
/* Handle tail in loops. The loops should be faster than hard
|
||||
to predict jump tables. */
|
||||
.p2align 4
|
||||
.Lhandle_tail:
|
||||
movl %edx,%ecx
|
||||
andl $63&(~7),%ecx
|
||||
jz .Lhandle_7
|
||||
shrl $3,%ecx
|
||||
.p2align 4
|
||||
.Lloop_8:
|
||||
decl %ecx
|
||||
movq %rax,(%rdi)
|
||||
leaq 8(%rdi),%rdi
|
||||
jnz .Lloop_8
|
||||
|
||||
.Lhandle_7:
|
||||
andl $7,%edx
|
||||
jz .Lende
|
||||
.p2align 4
|
||||
.Lloop_1:
|
||||
decl %edx
|
||||
movb %al,(%rdi)
|
||||
leaq 1(%rdi),%rdi
|
||||
jnz .Lloop_1
|
||||
|
||||
.Lende:
|
||||
movq %r10,%rax
|
||||
ret
|
||||
|
||||
.Lbad_alignment:
|
||||
cmpq $7,%rdx
|
||||
jbe .Lhandle_7
|
||||
movq %rax,(%rdi) /* unaligned store */
|
||||
movq $8,%r8
|
||||
subq %r9,%r8
|
||||
addq %r8,%rdi
|
||||
subq %r8,%rdx
|
||||
jmp .Lafter_bad_alignment
|
||||
.Lfinal:
|
||||
ENDPROC(memset_orig)
|
|
@ -40,6 +40,8 @@ FEATURE_TESTS_BASIC := \
|
|||
libbfd \
|
||||
libelf \
|
||||
libelf-getphdrnum \
|
||||
libelf-gelf_getnote \
|
||||
libelf-getshdrstrndx \
|
||||
libelf-mmap \
|
||||
libnuma \
|
||||
numa_num_possible_cpus \
|
||||
|
@ -60,7 +62,8 @@ FEATURE_TESTS_BASIC := \
|
|||
zlib \
|
||||
lzma \
|
||||
get_cpuid \
|
||||
bpf
|
||||
bpf \
|
||||
sdt
|
||||
|
||||
# FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
|
||||
# of all feature tests
|
||||
|
|
|
@ -17,6 +17,8 @@ FILES= \
|
|||
test-cplus-demangle.bin \
|
||||
test-libelf.bin \
|
||||
test-libelf-getphdrnum.bin \
|
||||
test-libelf-gelf_getnote.bin \
|
||||
test-libelf-getshdrstrndx.bin \
|
||||
test-libelf-mmap.bin \
|
||||
test-libnuma.bin \
|
||||
test-numa_num_possible_cpus.bin \
|
||||
|
@ -43,7 +45,8 @@ FILES= \
|
|||
test-zlib.bin \
|
||||
test-lzma.bin \
|
||||
test-bpf.bin \
|
||||
test-get_cpuid.bin
|
||||
test-get_cpuid.bin \
|
||||
test-sdt.bin
|
||||
|
||||
FILES := $(addprefix $(OUTPUT),$(FILES))
|
||||
|
||||
|
@ -98,6 +101,12 @@ $(OUTPUT)test-libelf-mmap.bin:
|
|||
$(OUTPUT)test-libelf-getphdrnum.bin:
|
||||
$(BUILD) -lelf
|
||||
|
||||
$(OUTPUT)test-libelf-gelf_getnote.bin:
|
||||
$(BUILD) -lelf
|
||||
|
||||
$(OUTPUT)test-libelf-getshdrstrndx.bin:
|
||||
$(BUILD) -lelf
|
||||
|
||||
$(OUTPUT)test-libnuma.bin:
|
||||
$(BUILD) -lnuma
|
||||
|
||||
|
@ -205,6 +214,9 @@ $(OUTPUT)test-get_cpuid.bin:
|
|||
$(OUTPUT)test-bpf.bin:
|
||||
$(BUILD)
|
||||
|
||||
$(OUTPUT)test-sdt.bin:
|
||||
$(BUILD)
|
||||
|
||||
-include $(OUTPUT)*.d
|
||||
|
||||
###############################
|
||||
|
|
|
@ -49,6 +49,14 @@
|
|||
# include "test-libelf-getphdrnum.c"
|
||||
#undef main
|
||||
|
||||
#define main main_test_libelf_gelf_getnote
|
||||
# include "test-libelf-gelf_getnote.c"
|
||||
#undef main
|
||||
|
||||
#define main main_test_libelf_getshdrstrndx
|
||||
# include "test-libelf-getshdrstrndx.c"
|
||||
#undef main
|
||||
|
||||
#define main main_test_libunwind
|
||||
# include "test-libunwind.c"
|
||||
#undef main
|
||||
|
@ -137,6 +145,10 @@
|
|||
# include "test-libcrypto.c"
|
||||
#undef main
|
||||
|
||||
#define main main_test_sdt
|
||||
# include "test-sdt.c"
|
||||
#undef main
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
main_test_libpython();
|
||||
|
@ -149,6 +161,8 @@ int main(int argc, char *argv[])
|
|||
main_test_dwarf();
|
||||
main_test_dwarf_getlocations();
|
||||
main_test_libelf_getphdrnum();
|
||||
main_test_libelf_gelf_getnote();
|
||||
main_test_libelf_getshdrstrndx();
|
||||
main_test_libunwind();
|
||||
main_test_libaudit();
|
||||
main_test_libslang();
|
||||
|
@ -168,6 +182,7 @@ int main(int argc, char *argv[])
|
|||
main_test_get_cpuid();
|
||||
main_test_bpf();
|
||||
main_test_libcrypto();
|
||||
main_test_sdt();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
7
tools/build/feature/test-libelf-gelf_getnote.c
Normal file
7
tools/build/feature/test-libelf-gelf_getnote.c
Normal file
|
@ -0,0 +1,7 @@
|
|||
#include <stdlib.h>
|
||||
#include <gelf.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
return gelf_getnote(NULL, 0, NULL, NULL, NULL);
|
||||
}
|
8
tools/build/feature/test-libelf-getshdrstrndx.c
Normal file
8
tools/build/feature/test-libelf-getshdrstrndx.c
Normal file
|
@ -0,0 +1,8 @@
|
|||
#include <libelf.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
size_t dst;
|
||||
|
||||
return elf_getshdrstrndx(0, &dst);
|
||||
}
|
7
tools/build/feature/test-sdt.c
Normal file
7
tools/build/feature/test-sdt.c
Normal file
|
@ -0,0 +1,7 @@
|
|||
#include <sys/sdt.h>
|
||||
|
||||
int main(void)
|
||||
{
|
||||
DTRACE_PROBE(provider, name);
|
||||
return 0;
|
||||
}
|
|
@ -2,6 +2,7 @@
|
|||
#define _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
/**
|
||||
* __ffs - find first bit in word.
|
||||
|
|
|
@ -1 +1,43 @@
|
|||
#include "../../../../include/asm-generic/bitops/__fls.h"
|
||||
#ifndef _ASM_GENERIC_BITOPS___FLS_H_
|
||||
#define _ASM_GENERIC_BITOPS___FLS_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
/**
|
||||
* __fls - find last (most-significant) set bit in a long word
|
||||
* @word: the word to search
|
||||
*
|
||||
* Undefined if no set bit exists, so code should check against 0 first.
|
||||
*/
|
||||
static __always_inline unsigned long __fls(unsigned long word)
|
||||
{
|
||||
int num = BITS_PER_LONG - 1;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
if (!(word & (~0ul << 32))) {
|
||||
num -= 32;
|
||||
word <<= 32;
|
||||
}
|
||||
#endif
|
||||
if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
|
||||
num -= 16;
|
||||
word <<= 16;
|
||||
}
|
||||
if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
|
||||
num -= 8;
|
||||
word <<= 8;
|
||||
}
|
||||
if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
|
||||
num -= 4;
|
||||
word <<= 4;
|
||||
}
|
||||
if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
|
||||
num -= 2;
|
||||
word <<= 2;
|
||||
}
|
||||
if (!(word & (~0ul << (BITS_PER_LONG-1))))
|
||||
num -= 1;
|
||||
return num;
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
|
||||
|
|
|
@ -1 +1,25 @@
|
|||
#include "../../../../include/asm-generic/bitops/arch_hweight.h"
|
||||
#ifndef _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
|
||||
#define _ASM_GENERIC_BITOPS_ARCH_HWEIGHT_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
static inline unsigned int __arch_hweight32(unsigned int w)
|
||||
{
|
||||
return __sw_hweight32(w);
|
||||
}
|
||||
|
||||
static inline unsigned int __arch_hweight16(unsigned int w)
|
||||
{
|
||||
return __sw_hweight16(w);
|
||||
}
|
||||
|
||||
static inline unsigned int __arch_hweight8(unsigned int w)
|
||||
{
|
||||
return __sw_hweight8(w);
|
||||
}
|
||||
|
||||
static inline unsigned long __arch_hweight64(__u64 w)
|
||||
{
|
||||
return __sw_hweight64(w);
|
||||
}
|
||||
#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/bitsperlong.h>
|
||||
|
||||
static inline void set_bit(int nr, unsigned long *addr)
|
||||
{
|
||||
|
|
|
@ -1 +1,43 @@
|
|||
#include "../../../../include/asm-generic/bitops/const_hweight.h"
|
||||
#ifndef _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
|
||||
#define _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_
|
||||
|
||||
/*
|
||||
* Compile time versions of __arch_hweightN()
|
||||
*/
|
||||
#define __const_hweight8(w) \
|
||||
((unsigned int) \
|
||||
((!!((w) & (1ULL << 0))) + \
|
||||
(!!((w) & (1ULL << 1))) + \
|
||||
(!!((w) & (1ULL << 2))) + \
|
||||
(!!((w) & (1ULL << 3))) + \
|
||||
(!!((w) & (1ULL << 4))) + \
|
||||
(!!((w) & (1ULL << 5))) + \
|
||||
(!!((w) & (1ULL << 6))) + \
|
||||
(!!((w) & (1ULL << 7)))))
|
||||
|
||||
#define __const_hweight16(w) (__const_hweight8(w) + __const_hweight8((w) >> 8 ))
|
||||
#define __const_hweight32(w) (__const_hweight16(w) + __const_hweight16((w) >> 16))
|
||||
#define __const_hweight64(w) (__const_hweight32(w) + __const_hweight32((w) >> 32))
|
||||
|
||||
/*
|
||||
* Generic interface.
|
||||
*/
|
||||
#define hweight8(w) (__builtin_constant_p(w) ? __const_hweight8(w) : __arch_hweight8(w))
|
||||
#define hweight16(w) (__builtin_constant_p(w) ? __const_hweight16(w) : __arch_hweight16(w))
|
||||
#define hweight32(w) (__builtin_constant_p(w) ? __const_hweight32(w) : __arch_hweight32(w))
|
||||
#define hweight64(w) (__builtin_constant_p(w) ? __const_hweight64(w) : __arch_hweight64(w))
|
||||
|
||||
/*
|
||||
* Interface for known constant arguments
|
||||
*/
|
||||
#define HWEIGHT8(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight8(w))
|
||||
#define HWEIGHT16(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight16(w))
|
||||
#define HWEIGHT32(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight32(w))
|
||||
#define HWEIGHT64(w) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(w)) + __const_hweight64(w))
|
||||
|
||||
/*
|
||||
* Type invariant interface to the compile time constant hweight functions.
|
||||
*/
|
||||
#define HWEIGHT(w) HWEIGHT64((u64)w)
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_CONST_HWEIGHT_H_ */
|
||||
|
|
|
@ -1 +1,41 @@
|
|||
#include "../../../../include/asm-generic/bitops/fls.h"
|
||||
#ifndef _ASM_GENERIC_BITOPS_FLS_H_
|
||||
#define _ASM_GENERIC_BITOPS_FLS_H_
|
||||
|
||||
/**
|
||||
* fls - find last (most-significant) bit set
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined the same way as ffs.
|
||||
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
|
||||
*/
|
||||
|
||||
static __always_inline int fls(int x)
|
||||
{
|
||||
int r = 32;
|
||||
|
||||
if (!x)
|
||||
return 0;
|
||||
if (!(x & 0xffff0000u)) {
|
||||
x <<= 16;
|
||||
r -= 16;
|
||||
}
|
||||
if (!(x & 0xff000000u)) {
|
||||
x <<= 8;
|
||||
r -= 8;
|
||||
}
|
||||
if (!(x & 0xf0000000u)) {
|
||||
x <<= 4;
|
||||
r -= 4;
|
||||
}
|
||||
if (!(x & 0xc0000000u)) {
|
||||
x <<= 2;
|
||||
r -= 2;
|
||||
}
|
||||
if (!(x & 0x80000000u)) {
|
||||
x <<= 1;
|
||||
r -= 1;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
|
||||
|
|
|
@ -1 +1,36 @@
|
|||
#include "../../../../include/asm-generic/bitops/fls64.h"
|
||||
#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
|
||||
#define _ASM_GENERIC_BITOPS_FLS64_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
/**
|
||||
* fls64 - find last set bit in a 64-bit word
|
||||
* @x: the word to search
|
||||
*
|
||||
* This is defined in a similar way as the libc and compiler builtin
|
||||
* ffsll, but returns the position of the most significant set bit.
|
||||
*
|
||||
* fls64(value) returns 0 if value is 0 or the position of the last
|
||||
* set bit if value is nonzero. The last (most significant) bit is
|
||||
* at position 64.
|
||||
*/
|
||||
#if BITS_PER_LONG == 32
|
||||
static __always_inline int fls64(__u64 x)
|
||||
{
|
||||
__u32 h = x >> 32;
|
||||
if (h)
|
||||
return fls(h) + 32;
|
||||
return fls(x);
|
||||
}
|
||||
#elif BITS_PER_LONG == 64
|
||||
static __always_inline int fls64(__u64 x)
|
||||
{
|
||||
if (x == 0)
|
||||
return 0;
|
||||
return __fls(x) + 1;
|
||||
}
|
||||
#else
|
||||
#error BITS_PER_LONG not 32 or 64
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
|
||||
|
|
20
tools/include/asm-generic/bitsperlong.h
Normal file
20
tools/include/asm-generic/bitsperlong.h
Normal file
|
@ -0,0 +1,20 @@
|
|||
#ifndef __ASM_GENERIC_BITS_PER_LONG
|
||||
#define __ASM_GENERIC_BITS_PER_LONG
|
||||
|
||||
#include <uapi/asm-generic/bitsperlong.h>
|
||||
|
||||
#ifdef __SIZEOF_LONG__
|
||||
#define BITS_PER_LONG (__CHAR_BIT__ * __SIZEOF_LONG__)
|
||||
#else
|
||||
#define BITS_PER_LONG __WORDSIZE
|
||||
#endif
|
||||
|
||||
#if BITS_PER_LONG != __BITS_PER_LONG
|
||||
#error Inconsistent word size. Check asm/bitsperlong.h
|
||||
#endif
|
||||
|
||||
#ifndef BITS_PER_LONG_LONG
|
||||
#define BITS_PER_LONG_LONG 64
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_GENERIC_BITS_PER_LONG */
|
|
@ -1,5 +1,5 @@
|
|||
#ifndef _PERF_ASM_ALTERNATIVE_ASM_H
|
||||
#define _PERF_ASM_ALTERNATIVE_ASM_H
|
||||
#ifndef _TOOLS_ASM_ALTERNATIVE_ASM_H
|
||||
#define _TOOLS_ASM_ALTERNATIVE_ASM_H
|
||||
|
||||
/* Just disable it so we can build arch/x86/lib/memcpy_64.S for perf bench: */
|
||||
|
|
@ -9,7 +9,9 @@
|
|||
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
|
||||
#endif
|
||||
|
||||
#define BITS_PER_LONG __WORDSIZE
|
||||
#ifndef BITS_PER_LONG
|
||||
# define BITS_PER_LONG __WORDSIZE
|
||||
#endif
|
||||
|
||||
#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
|
||||
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
|
||||
|
|
|
@ -9,6 +9,17 @@
|
|||
# define __always_inline inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
#ifdef __ANDROID__
|
||||
/*
|
||||
* FIXME: Big hammer to get rid of tons of:
|
||||
* "warning: always_inline function might not be inlinable"
|
||||
*
|
||||
* At least on android-ndk-r12/platforms/android-24/arch-arm
|
||||
*/
|
||||
#undef __always_inline
|
||||
#define __always_inline inline
|
||||
#endif
|
||||
|
||||
#define __user
|
||||
|
||||
#ifndef __attribute_const__
|
||||
|
|
|
@ -1,5 +1,104 @@
|
|||
#include "../../../include/linux/hash.h"
|
||||
#ifndef _LINUX_HASH_H
|
||||
#define _LINUX_HASH_H
|
||||
/* Fast hashing routine for ints, longs and pointers.
|
||||
(C) 2002 Nadia Yvette Chambers, IBM */
|
||||
|
||||
#ifndef _TOOLS_LINUX_HASH_H
|
||||
#define _TOOLS_LINUX_HASH_H
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/*
|
||||
* The "GOLDEN_RATIO_PRIME" is used in ifs/btrfs/brtfs_inode.h and
|
||||
* fs/inode.c. It's not actually prime any more (the previous primes
|
||||
* were actively bad for hashing), but the name remains.
|
||||
*/
|
||||
#if BITS_PER_LONG == 32
|
||||
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_32
|
||||
#define hash_long(val, bits) hash_32(val, bits)
|
||||
#elif BITS_PER_LONG == 64
|
||||
#define hash_long(val, bits) hash_64(val, bits)
|
||||
#define GOLDEN_RATIO_PRIME GOLDEN_RATIO_64
|
||||
#else
|
||||
#error Wordsize not 32 or 64
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This hash multiplies the input by a large odd number and takes the
|
||||
* high bits. Since multiplication propagates changes to the most
|
||||
* significant end only, it is essential that the high bits of the
|
||||
* product be used for the hash value.
|
||||
*
|
||||
* Chuck Lever verified the effectiveness of this technique:
|
||||
* http://www.citi.umich.edu/techreports/reports/citi-tr-00-1.pdf
|
||||
*
|
||||
* Although a random odd number will do, it turns out that the golden
|
||||
* ratio phi = (sqrt(5)-1)/2, or its negative, has particularly nice
|
||||
* properties. (See Knuth vol 3, section 6.4, exercise 9.)
|
||||
*
|
||||
* These are the negative, (1 - phi) = phi**2 = (3 - sqrt(5))/2,
|
||||
* which is very slightly easier to multiply by and makes no
|
||||
* difference to the hash distribution.
|
||||
*/
|
||||
#define GOLDEN_RATIO_32 0x61C88647
|
||||
#define GOLDEN_RATIO_64 0x61C8864680B583EBull
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_HASH
|
||||
/* This header may use the GOLDEN_RATIO_xx constants */
|
||||
#include <asm/hash.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The _generic versions exist only so lib/test_hash.c can compare
|
||||
* the arch-optimized versions with the generic.
|
||||
*
|
||||
* Note that if you change these, any <asm/hash.h> that aren't updated
|
||||
* to match need to have their HAVE_ARCH_* define values updated so the
|
||||
* self-test will not false-positive.
|
||||
*/
|
||||
#ifndef HAVE_ARCH__HASH_32
|
||||
#define __hash_32 __hash_32_generic
|
||||
#endif
|
||||
static inline u32 __hash_32_generic(u32 val)
|
||||
{
|
||||
return val * GOLDEN_RATIO_32;
|
||||
}
|
||||
|
||||
#ifndef HAVE_ARCH_HASH_32
|
||||
#define hash_32 hash_32_generic
|
||||
#endif
|
||||
static inline u32 hash_32_generic(u32 val, unsigned int bits)
|
||||
{
|
||||
/* High bits are more random, so use them. */
|
||||
return __hash_32(val) >> (32 - bits);
|
||||
}
|
||||
|
||||
#ifndef HAVE_ARCH_HASH_64
|
||||
#define hash_64 hash_64_generic
|
||||
#endif
|
||||
static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
|
||||
{
|
||||
#if BITS_PER_LONG == 64
|
||||
/* 64x64-bit multiply is efficient on all 64-bit processors */
|
||||
return val * GOLDEN_RATIO_64 >> (64 - bits);
|
||||
#else
|
||||
/* Hash 64 bits using only 32x32-bit multiply. */
|
||||
return hash_32((u32)val ^ __hash_32(val >> 32), bits);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u32 hash_ptr(const void *ptr, unsigned int bits)
|
||||
{
|
||||
return hash_long((unsigned long)ptr, bits);
|
||||
}
|
||||
|
||||
/* This really should be called fold32_ptr; it does no hashing to speak of. */
|
||||
static inline u32 hash32_ptr(const void *ptr)
|
||||
{
|
||||
unsigned long val = (unsigned long)ptr;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
val ^= (val >> 32);
|
||||
#endif
|
||||
return (u32)val;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_HASH_H */
|
||||
|
|
|
@ -2,8 +2,7 @@
|
|||
#define __TOOLS_LINUX_KERNEL_H
|
||||
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <stddef.h>
|
||||
#include <assert.h>
|
||||
|
||||
#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
|
||||
|
@ -70,29 +69,8 @@
|
|||
#define cpu_to_le64(x) (x)
|
||||
#define cpu_to_le32(x) (x)
|
||||
|
||||
static inline int
|
||||
vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
|
||||
{
|
||||
int i;
|
||||
ssize_t ssize = size;
|
||||
|
||||
i = vsnprintf(buf, size, fmt, args);
|
||||
|
||||
return (i >= ssize) ? (ssize - 1) : i;
|
||||
}
|
||||
|
||||
static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
ssize_t ssize = size;
|
||||
int i;
|
||||
|
||||
va_start(args, fmt);
|
||||
i = vsnprintf(buf, size, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
return (i >= ssize) ? (ssize - 1) : i;
|
||||
}
|
||||
int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
|
||||
int scnprintf(char * buf, size_t size, const char * fmt, ...);
|
||||
|
||||
/*
|
||||
* This looks more complex than it should be. But we need to
|
||||
|
|
|
@ -1 +1,90 @@
|
|||
#include "../../../include/linux/poison.h"
|
||||
#ifndef _LINUX_POISON_H
|
||||
#define _LINUX_POISON_H
|
||||
|
||||
/********** include/linux/list.h **********/
|
||||
|
||||
/*
|
||||
* Architectures might want to move the poison pointer offset
|
||||
* into some well-recognized area such as 0xdead000000000000,
|
||||
* that is also not mappable by user-space exploits:
|
||||
*/
|
||||
#ifdef CONFIG_ILLEGAL_POINTER_VALUE
|
||||
# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
|
||||
#else
|
||||
# define POISON_POINTER_DELTA 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These are non-NULL pointers that will result in page faults
|
||||
* under normal circumstances, used to verify that nobody uses
|
||||
* non-initialized list entries.
|
||||
*/
|
||||
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
|
||||
|
||||
/********** include/linux/timer.h **********/
|
||||
/*
|
||||
* Magic number "tsta" to indicate a static timer initializer
|
||||
* for the object debugging code.
|
||||
*/
|
||||
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
|
||||
|
||||
/********** mm/debug-pagealloc.c **********/
|
||||
#ifdef CONFIG_PAGE_POISONING_ZERO
|
||||
#define PAGE_POISON 0x00
|
||||
#else
|
||||
#define PAGE_POISON 0xaa
|
||||
#endif
|
||||
|
||||
/********** mm/page_alloc.c ************/
|
||||
|
||||
#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA)
|
||||
|
||||
/********** mm/slab.c **********/
|
||||
/*
|
||||
* Magic nums for obj red zoning.
|
||||
* Placed in the first word before and the first word after an obj.
|
||||
*/
|
||||
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
|
||||
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
|
||||
|
||||
#define SLUB_RED_INACTIVE 0xbb
|
||||
#define SLUB_RED_ACTIVE 0xcc
|
||||
|
||||
/* ...and for poisoning */
|
||||
#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
|
||||
#define POISON_FREE 0x6b /* for use-after-free poisoning */
|
||||
#define POISON_END 0xa5 /* end-byte of poisoning */
|
||||
|
||||
/********** arch/$ARCH/mm/init.c **********/
|
||||
#define POISON_FREE_INITMEM 0xcc
|
||||
|
||||
/********** arch/ia64/hp/common/sba_iommu.c **********/
|
||||
/*
|
||||
* arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
|
||||
* value of "SBAIOMMU POISON\0" for spill-over poisoning.
|
||||
*/
|
||||
|
||||
/********** fs/jbd/journal.c **********/
|
||||
#define JBD_POISON_FREE 0x5b
|
||||
#define JBD2_POISON_FREE 0x5c
|
||||
|
||||
/********** drivers/base/dmapool.c **********/
|
||||
#define POOL_POISON_FREED 0xa7 /* !inuse */
|
||||
#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
|
||||
|
||||
/********** drivers/atm/ **********/
|
||||
#define ATM_POISON_FREE 0x12
|
||||
#define ATM_POISON 0xdeadbeef
|
||||
|
||||
/********** kernel/mutexes **********/
|
||||
#define MUTEX_DEBUG_INIT 0x11
|
||||
#define MUTEX_DEBUG_FREE 0x22
|
||||
|
||||
/********** lib/flex_array.c **********/
|
||||
#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
|
||||
|
||||
/********** security/ **********/
|
||||
#define KEY_DESTROY 0xbd
|
||||
|
||||
#endif
|
||||
|
|
|
@ -8,8 +8,10 @@ void *memdup(const void *src, size_t len);
|
|||
|
||||
int strtobool(const char *s, bool *res);
|
||||
|
||||
#ifndef __UCLIBC__
|
||||
#ifdef __GLIBC__
|
||||
extern size_t strlcpy(char *dest, const char *src, size_t size);
|
||||
#endif
|
||||
|
||||
char *str_error_r(int errnum, char *buf, size_t buflen);
|
||||
|
||||
#endif /* _LINUX_STRING_H_ */
|
||||
|
|
15
tools/include/uapi/asm-generic/bitsperlong.h
Normal file
15
tools/include/uapi/asm-generic/bitsperlong.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
#ifndef _UAPI__ASM_GENERIC_BITS_PER_LONG
|
||||
#define _UAPI__ASM_GENERIC_BITS_PER_LONG
|
||||
|
||||
/*
|
||||
* There seems to be no way of detecting this automatically from user
|
||||
* space, so 64 bit architectures should override this in their
|
||||
* bitsperlong.h. In particular, an architecture that supports
|
||||
* both 32 and 64 bit user space must not rely on CONFIG_64BIT
|
||||
* to decide it, but rather check a compiler provided macro.
|
||||
*/
|
||||
#ifndef __BITS_PER_LONG
|
||||
#define __BITS_PER_LONG 32
|
||||
#endif
|
||||
|
||||
#endif /* _UAPI__ASM_GENERIC_BITS_PER_LONG */
|
389
tools/include/uapi/linux/bpf.h
Normal file
389
tools/include/uapi/linux/bpf.h
Normal file
|
@ -0,0 +1,389 @@
|
|||
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*/
|
||||
#ifndef _UAPI__LINUX_BPF_H__
|
||||
#define _UAPI__LINUX_BPF_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bpf_common.h>
|
||||
|
||||
/* Extended instruction set based on top of classic BPF */
|
||||
|
||||
/* instruction classes */
|
||||
#define BPF_ALU64 0x07 /* alu mode in double word width */
|
||||
|
||||
/* ld/ldx fields */
|
||||
#define BPF_DW 0x18 /* double word */
|
||||
#define BPF_XADD 0xc0 /* exclusive add */
|
||||
|
||||
/* alu/jmp fields */
|
||||
#define BPF_MOV 0xb0 /* mov reg to reg */
|
||||
#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */
|
||||
|
||||
/* change endianness of a register */
|
||||
#define BPF_END 0xd0 /* flags for endianness conversion: */
|
||||
#define BPF_TO_LE 0x00 /* convert to little-endian */
|
||||
#define BPF_TO_BE 0x08 /* convert to big-endian */
|
||||
#define BPF_FROM_LE BPF_TO_LE
|
||||
#define BPF_FROM_BE BPF_TO_BE
|
||||
|
||||
#define BPF_JNE 0x50 /* jump != */
|
||||
#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */
|
||||
#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */
|
||||
#define BPF_CALL 0x80 /* function call */
|
||||
#define BPF_EXIT 0x90 /* function return */
|
||||
|
||||
/* Register numbers */
|
||||
enum {
|
||||
BPF_REG_0 = 0,
|
||||
BPF_REG_1,
|
||||
BPF_REG_2,
|
||||
BPF_REG_3,
|
||||
BPF_REG_4,
|
||||
BPF_REG_5,
|
||||
BPF_REG_6,
|
||||
BPF_REG_7,
|
||||
BPF_REG_8,
|
||||
BPF_REG_9,
|
||||
BPF_REG_10,
|
||||
__MAX_BPF_REG,
|
||||
};
|
||||
|
||||
/* BPF has 10 general purpose 64-bit registers and stack frame. */
|
||||
#define MAX_BPF_REG __MAX_BPF_REG
|
||||
|
||||
struct bpf_insn {
|
||||
__u8 code; /* opcode */
|
||||
__u8 dst_reg:4; /* dest register */
|
||||
__u8 src_reg:4; /* source register */
|
||||
__s16 off; /* signed offset */
|
||||
__s32 imm; /* signed immediate constant */
|
||||
};
|
||||
|
||||
/* BPF syscall commands, see bpf(2) man-page for details. */
|
||||
enum bpf_cmd {
|
||||
BPF_MAP_CREATE,
|
||||
BPF_MAP_LOOKUP_ELEM,
|
||||
BPF_MAP_UPDATE_ELEM,
|
||||
BPF_MAP_DELETE_ELEM,
|
||||
BPF_MAP_GET_NEXT_KEY,
|
||||
BPF_PROG_LOAD,
|
||||
BPF_OBJ_PIN,
|
||||
BPF_OBJ_GET,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
BPF_MAP_TYPE_UNSPEC,
|
||||
BPF_MAP_TYPE_HASH,
|
||||
BPF_MAP_TYPE_ARRAY,
|
||||
BPF_MAP_TYPE_PROG_ARRAY,
|
||||
BPF_MAP_TYPE_PERF_EVENT_ARRAY,
|
||||
BPF_MAP_TYPE_PERCPU_HASH,
|
||||
BPF_MAP_TYPE_PERCPU_ARRAY,
|
||||
BPF_MAP_TYPE_STACK_TRACE,
|
||||
};
|
||||
|
||||
enum bpf_prog_type {
|
||||
BPF_PROG_TYPE_UNSPEC,
|
||||
BPF_PROG_TYPE_SOCKET_FILTER,
|
||||
BPF_PROG_TYPE_KPROBE,
|
||||
BPF_PROG_TYPE_SCHED_CLS,
|
||||
BPF_PROG_TYPE_SCHED_ACT,
|
||||
BPF_PROG_TYPE_TRACEPOINT,
|
||||
};
|
||||
|
||||
#define BPF_PSEUDO_MAP_FD 1
|
||||
|
||||
/* flags for BPF_MAP_UPDATE_ELEM command */
|
||||
#define BPF_ANY 0 /* create new element or update existing */
|
||||
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
|
||||
#define BPF_EXIST 2 /* update existing element */
|
||||
|
||||
#define BPF_F_NO_PREALLOC (1U << 0)
|
||||
|
||||
union bpf_attr {
|
||||
struct { /* anonymous struct used by BPF_MAP_CREATE command */
|
||||
__u32 map_type; /* one of enum bpf_map_type */
|
||||
__u32 key_size; /* size of key in bytes */
|
||||
__u32 value_size; /* size of value in bytes */
|
||||
__u32 max_entries; /* max number of entries in a map */
|
||||
__u32 map_flags; /* prealloc or not */
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
|
||||
__u32 map_fd;
|
||||
__aligned_u64 key;
|
||||
union {
|
||||
__aligned_u64 value;
|
||||
__aligned_u64 next_key;
|
||||
};
|
||||
__u64 flags;
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_PROG_LOAD command */
|
||||
__u32 prog_type; /* one of enum bpf_prog_type */
|
||||
__u32 insn_cnt;
|
||||
__aligned_u64 insns;
|
||||
__aligned_u64 license;
|
||||
__u32 log_level; /* verbosity level of verifier */
|
||||
__u32 log_size; /* size of user buffer */
|
||||
__aligned_u64 log_buf; /* user supplied buffer */
|
||||
__u32 kern_version; /* checked when prog_type=kprobe */
|
||||
};
|
||||
|
||||
struct { /* anonymous struct used by BPF_OBJ_* commands */
|
||||
__aligned_u64 pathname;
|
||||
__u32 bpf_fd;
|
||||
};
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
* function eBPF program intends to call
|
||||
*/
|
||||
enum bpf_func_id {
|
||||
BPF_FUNC_unspec,
|
||||
BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */
|
||||
BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */
|
||||
BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */
|
||||
BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */
|
||||
BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */
|
||||
BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */
|
||||
BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */
|
||||
BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */
|
||||
|
||||
/**
|
||||
* skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet
|
||||
* @skb: pointer to skb
|
||||
* @offset: offset within packet from skb->mac_header
|
||||
* @from: pointer where to copy bytes from
|
||||
* @len: number of bytes to store into packet
|
||||
* @flags: bit 0 - if true, recompute skb->csum
|
||||
* other bits - reserved
|
||||
* Return: 0 on success
|
||||
*/
|
||||
BPF_FUNC_skb_store_bytes,
|
||||
|
||||
/**
|
||||
* l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum
|
||||
* @skb: pointer to skb
|
||||
* @offset: offset within packet where IP checksum is located
|
||||
* @from: old value of header field
|
||||
* @to: new value of header field
|
||||
* @flags: bits 0-3 - size of header field
|
||||
* other bits - reserved
|
||||
* Return: 0 on success
|
||||
*/
|
||||
BPF_FUNC_l3_csum_replace,
|
||||
|
||||
/**
|
||||
* l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum
|
||||
* @skb: pointer to skb
|
||||
* @offset: offset within packet where TCP/UDP checksum is located
|
||||
* @from: old value of header field
|
||||
* @to: new value of header field
|
||||
* @flags: bits 0-3 - size of header field
|
||||
* bit 4 - is pseudo header
|
||||
* other bits - reserved
|
||||
* Return: 0 on success
|
||||
*/
|
||||
BPF_FUNC_l4_csum_replace,
|
||||
|
||||
/**
|
||||
* bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program
|
||||
* @ctx: context pointer passed to next program
|
||||
* @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY
|
||||
* @index: index inside array that selects specific program to run
|
||||
* Return: 0 on success
|
||||
*/
|
||||
BPF_FUNC_tail_call,
|
||||
|
||||
/**
|
||||
* bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev
|
||||
* @skb: pointer to skb
|
||||
* @ifindex: ifindex of the net device
|
||||
* @flags: bit 0 - if set, redirect to ingress instead of egress
|
||||
* other bits - reserved
|
||||
* Return: 0 on success
|
||||
*/
|
||||
BPF_FUNC_clone_redirect,
|
||||
|
||||
/**
|
||||
* u64 bpf_get_current_pid_tgid(void)
|
||||
* Return: current->tgid << 32 | current->pid
|
||||
*/
|
||||
BPF_FUNC_get_current_pid_tgid,
|
||||
|
||||
/**
|
||||
* u64 bpf_get_current_uid_gid(void)
|
||||
* Return: current_gid << 32 | current_uid
|
||||
*/
|
||||
BPF_FUNC_get_current_uid_gid,
|
||||
|
||||
/**
|
||||
* bpf_get_current_comm(char *buf, int size_of_buf)
|
||||
* stores current->comm into buf
|
||||
* Return: 0 on success
|
||||
*/
|
||||
BPF_FUNC_get_current_comm,
|
||||
|
||||
/**
|
||||
* bpf_get_cgroup_classid(skb) - retrieve a proc's classid
|
||||
* @skb: pointer to skb
|
||||
* Return: classid if != 0
|
||||
*/
|
||||
BPF_FUNC_get_cgroup_classid,
|
||||
BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */
|
||||
BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */
|
||||
|
||||
/**
|
||||
* bpf_skb_[gs]et_tunnel_key(skb, key, size, flags)
|
||||
* retrieve or populate tunnel metadata
|
||||
* @skb: pointer to skb
|
||||
* @key: pointer to 'struct bpf_tunnel_key'
|
||||
* @size: size of 'struct bpf_tunnel_key'
|
||||
* @flags: room for future extensions
|
||||
* Retrun: 0 on success
|
||||
*/
|
||||
BPF_FUNC_skb_get_tunnel_key,
|
||||
BPF_FUNC_skb_set_tunnel_key,
|
||||
BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */
|
||||
/**
|
||||
* bpf_redirect(ifindex, flags) - redirect to another netdev
|
||||
* @ifindex: ifindex of the net device
|
||||
* @flags: bit 0 - if set, redirect to ingress instead of egress
|
||||
* other bits - reserved
|
||||
* Return: TC_ACT_REDIRECT
|
||||
*/
|
||||
BPF_FUNC_redirect,
|
||||
|
||||
/**
|
||||
* bpf_get_route_realm(skb) - retrieve a dst's tclassid
|
||||
* @skb: pointer to skb
|
||||
* Return: realm if != 0
|
||||
*/
|
||||
BPF_FUNC_get_route_realm,
|
||||
|
||||
/**
|
||||
* bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample
|
||||
* @ctx: struct pt_regs*
|
||||
* @map: pointer to perf_event_array map
|
||||
* @index: index of event in the map
|
||||
* @data: data on stack to be output as raw data
|
||||
* @size: size of data
|
||||
* Return: 0 on success
|
||||
*/
|
||||
BPF_FUNC_perf_event_output,
|
||||
BPF_FUNC_skb_load_bytes,
|
||||
|
||||
/**
|
||||
* bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id
|
||||
* @ctx: struct pt_regs*
|
||||
* @map: pointer to stack_trace map
|
||||
* @flags: bits 0-7 - numer of stack frames to skip
|
||||
* bit 8 - collect user stack instead of kernel
|
||||
* bit 9 - compare stacks by hash only
|
||||
* bit 10 - if two different stacks hash into the same stackid
|
||||
* discard old
|
||||
* other bits - reserved
|
||||
* Return: >= 0 stackid on success or negative error
|
||||
*/
|
||||
BPF_FUNC_get_stackid,
|
||||
|
||||
/**
|
||||
* bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff
|
||||
* @from: raw from buffer
|
||||
* @from_size: length of from buffer
|
||||
* @to: raw to buffer
|
||||
* @to_size: length of to buffer
|
||||
* @seed: optional seed
|
||||
* Return: csum result
|
||||
*/
|
||||
BPF_FUNC_csum_diff,
|
||||
|
||||
/**
|
||||
* bpf_skb_[gs]et_tunnel_opt(skb, opt, size)
|
||||
* retrieve or populate tunnel options metadata
|
||||
* @skb: pointer to skb
|
||||
* @opt: pointer to raw tunnel option data
|
||||
* @size: size of @opt
|
||||
* Return: 0 on success for set, option size for get
|
||||
*/
|
||||
BPF_FUNC_skb_get_tunnel_opt,
|
||||
BPF_FUNC_skb_set_tunnel_opt,
|
||||
__BPF_FUNC_MAX_ID,
|
||||
};
|
||||
|
||||
/* All flags used by eBPF helper functions, placed here. */
|
||||
|
||||
/* BPF_FUNC_skb_store_bytes flags. */
|
||||
#define BPF_F_RECOMPUTE_CSUM (1ULL << 0)
|
||||
#define BPF_F_INVALIDATE_HASH (1ULL << 1)
|
||||
|
||||
/* BPF_FUNC_l3_csum_replace and BPF_FUNC_l4_csum_replace flags.
|
||||
* First 4 bits are for passing the header field size.
|
||||
*/
|
||||
#define BPF_F_HDR_FIELD_MASK 0xfULL
|
||||
|
||||
/* BPF_FUNC_l4_csum_replace flags. */
|
||||
#define BPF_F_PSEUDO_HDR (1ULL << 4)
|
||||
#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
|
||||
|
||||
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
|
||||
#define BPF_F_INGRESS (1ULL << 0)
|
||||
|
||||
/* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */
|
||||
#define BPF_F_TUNINFO_IPV6 (1ULL << 0)
|
||||
|
||||
/* BPF_FUNC_get_stackid flags. */
|
||||
#define BPF_F_SKIP_FIELD_MASK 0xffULL
|
||||
#define BPF_F_USER_STACK (1ULL << 8)
|
||||
#define BPF_F_FAST_STACK_CMP (1ULL << 9)
|
||||
#define BPF_F_REUSE_STACKID (1ULL << 10)
|
||||
|
||||
/* BPF_FUNC_skb_set_tunnel_key flags. */
|
||||
#define BPF_F_ZERO_CSUM_TX (1ULL << 1)
|
||||
#define BPF_F_DONT_FRAGMENT (1ULL << 2)
|
||||
|
||||
/* BPF_FUNC_perf_event_output flags. */
|
||||
#define BPF_F_INDEX_MASK 0xffffffffULL
|
||||
#define BPF_F_CURRENT_CPU BPF_F_INDEX_MASK
|
||||
|
||||
/* user accessible mirror of in-kernel sk_buff.
|
||||
* new fields can only be added to the end of this structure
|
||||
*/
|
||||
struct __sk_buff {
|
||||
__u32 len;
|
||||
__u32 pkt_type;
|
||||
__u32 mark;
|
||||
__u32 queue_mapping;
|
||||
__u32 protocol;
|
||||
__u32 vlan_present;
|
||||
__u32 vlan_tci;
|
||||
__u32 vlan_proto;
|
||||
__u32 priority;
|
||||
__u32 ingress_ifindex;
|
||||
__u32 ifindex;
|
||||
__u32 tc_index;
|
||||
__u32 cb[5];
|
||||
__u32 hash;
|
||||
__u32 tc_classid;
|
||||
__u32 data;
|
||||
__u32 data_end;
|
||||
};
|
||||
|
||||
struct bpf_tunnel_key {
|
||||
__u32 tunnel_id;
|
||||
union {
|
||||
__u32 remote_ipv4;
|
||||
__u32 remote_ipv6[4];
|
||||
};
|
||||
__u8 tunnel_tos;
|
||||
__u8 tunnel_ttl;
|
||||
__u16 tunnel_ext;
|
||||
__u32 tunnel_label;
|
||||
};
|
||||
|
||||
#endif /* _UAPI__LINUX_BPF_H__ */
|
55
tools/include/uapi/linux/bpf_common.h
Normal file
55
tools/include/uapi/linux/bpf_common.h
Normal file
|
@ -0,0 +1,55 @@
|
|||
#ifndef _UAPI__LINUX_BPF_COMMON_H__
|
||||
#define _UAPI__LINUX_BPF_COMMON_H__
|
||||
|
||||
/* Instruction classes */
|
||||
#define BPF_CLASS(code) ((code) & 0x07)
|
||||
#define BPF_LD 0x00
|
||||
#define BPF_LDX 0x01
|
||||
#define BPF_ST 0x02
|
||||
#define BPF_STX 0x03
|
||||
#define BPF_ALU 0x04
|
||||
#define BPF_JMP 0x05
|
||||
#define BPF_RET 0x06
|
||||
#define BPF_MISC 0x07
|
||||
|
||||
/* ld/ldx fields */
|
||||
#define BPF_SIZE(code) ((code) & 0x18)
|
||||
#define BPF_W 0x00
|
||||
#define BPF_H 0x08
|
||||
#define BPF_B 0x10
|
||||
#define BPF_MODE(code) ((code) & 0xe0)
|
||||
#define BPF_IMM 0x00
|
||||
#define BPF_ABS 0x20
|
||||
#define BPF_IND 0x40
|
||||
#define BPF_MEM 0x60
|
||||
#define BPF_LEN 0x80
|
||||
#define BPF_MSH 0xa0
|
||||
|
||||
/* alu/jmp fields */
|
||||
#define BPF_OP(code) ((code) & 0xf0)
|
||||
#define BPF_ADD 0x00
|
||||
#define BPF_SUB 0x10
|
||||
#define BPF_MUL 0x20
|
||||
#define BPF_DIV 0x30
|
||||
#define BPF_OR 0x40
|
||||
#define BPF_AND 0x50
|
||||
#define BPF_LSH 0x60
|
||||
#define BPF_RSH 0x70
|
||||
#define BPF_NEG 0x80
|
||||
#define BPF_MOD 0x90
|
||||
#define BPF_XOR 0xa0
|
||||
|
||||
#define BPF_JA 0x00
|
||||
#define BPF_JEQ 0x10
|
||||
#define BPF_JGT 0x20
|
||||
#define BPF_JGE 0x30
|
||||
#define BPF_JSET 0x40
|
||||
#define BPF_SRC(code) ((code) & 0x08)
|
||||
#define BPF_K 0x00
|
||||
#define BPF_X 0x08
|
||||
|
||||
#ifndef BPF_MAXINSNS
|
||||
#define BPF_MAXINSNS 4096
|
||||
#endif
|
||||
|
||||
#endif /* _UAPI__LINUX_BPF_COMMON_H__ */
|
30
tools/include/uapi/linux/hw_breakpoint.h
Normal file
30
tools/include/uapi/linux/hw_breakpoint.h
Normal file
|
@ -0,0 +1,30 @@
|
|||
#ifndef _UAPI_LINUX_HW_BREAKPOINT_H
|
||||
#define _UAPI_LINUX_HW_BREAKPOINT_H
|
||||
|
||||
enum {
|
||||
HW_BREAKPOINT_LEN_1 = 1,
|
||||
HW_BREAKPOINT_LEN_2 = 2,
|
||||
HW_BREAKPOINT_LEN_4 = 4,
|
||||
HW_BREAKPOINT_LEN_8 = 8,
|
||||
};
|
||||
|
||||
enum {
|
||||
HW_BREAKPOINT_EMPTY = 0,
|
||||
HW_BREAKPOINT_R = 1,
|
||||
HW_BREAKPOINT_W = 2,
|
||||
HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
|
||||
HW_BREAKPOINT_X = 4,
|
||||
HW_BREAKPOINT_INVALID = HW_BREAKPOINT_RW | HW_BREAKPOINT_X,
|
||||
};
|
||||
|
||||
enum bp_type_idx {
|
||||
TYPE_INST = 0,
|
||||
#ifdef CONFIG_HAVE_MIXED_BREAKPOINTS_REGS
|
||||
TYPE_DATA = 0,
|
||||
#else
|
||||
TYPE_DATA = 1,
|
||||
#endif
|
||||
TYPE_MAX
|
||||
};
|
||||
|
||||
#endif /* _UAPI_LINUX_HW_BREAKPOINT_H */
|
983
tools/include/uapi/linux/perf_event.h
Normal file
983
tools/include/uapi/linux/perf_event.h
Normal file
|
@ -0,0 +1,983 @@
|
|||
/*
|
||||
* Performance events:
|
||||
*
|
||||
* Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
|
||||
* Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
|
||||
*
|
||||
* Data type definitions, declarations, prototypes.
|
||||
*
|
||||
* Started by: Thomas Gleixner and Ingo Molnar
|
||||
*
|
||||
* For licencing details see kernel-base/COPYING
|
||||
*/
|
||||
#ifndef _UAPI_LINUX_PERF_EVENT_H
|
||||
#define _UAPI_LINUX_PERF_EVENT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ioctl.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/*
|
||||
* User-space ABI bits:
|
||||
*/
|
||||
|
||||
/*
|
||||
* attr.type
|
||||
*/
|
||||
enum perf_type_id {
|
||||
PERF_TYPE_HARDWARE = 0,
|
||||
PERF_TYPE_SOFTWARE = 1,
|
||||
PERF_TYPE_TRACEPOINT = 2,
|
||||
PERF_TYPE_HW_CACHE = 3,
|
||||
PERF_TYPE_RAW = 4,
|
||||
PERF_TYPE_BREAKPOINT = 5,
|
||||
|
||||
PERF_TYPE_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Generalized performance event event_id types, used by the
|
||||
* attr.event_id parameter of the sys_perf_event_open()
|
||||
* syscall:
|
||||
*/
|
||||
enum perf_hw_id {
|
||||
/*
|
||||
* Common hardware events, generalized by the kernel:
|
||||
*/
|
||||
PERF_COUNT_HW_CPU_CYCLES = 0,
|
||||
PERF_COUNT_HW_INSTRUCTIONS = 1,
|
||||
PERF_COUNT_HW_CACHE_REFERENCES = 2,
|
||||
PERF_COUNT_HW_CACHE_MISSES = 3,
|
||||
PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
|
||||
PERF_COUNT_HW_BRANCH_MISSES = 5,
|
||||
PERF_COUNT_HW_BUS_CYCLES = 6,
|
||||
PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
|
||||
PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
|
||||
PERF_COUNT_HW_REF_CPU_CYCLES = 9,
|
||||
|
||||
PERF_COUNT_HW_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Generalized hardware cache events:
|
||||
*
|
||||
* { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
|
||||
* { read, write, prefetch } x
|
||||
* { accesses, misses }
|
||||
*/
|
||||
enum perf_hw_cache_id {
|
||||
PERF_COUNT_HW_CACHE_L1D = 0,
|
||||
PERF_COUNT_HW_CACHE_L1I = 1,
|
||||
PERF_COUNT_HW_CACHE_LL = 2,
|
||||
PERF_COUNT_HW_CACHE_DTLB = 3,
|
||||
PERF_COUNT_HW_CACHE_ITLB = 4,
|
||||
PERF_COUNT_HW_CACHE_BPU = 5,
|
||||
PERF_COUNT_HW_CACHE_NODE = 6,
|
||||
|
||||
PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_hw_cache_op_id {
|
||||
PERF_COUNT_HW_CACHE_OP_READ = 0,
|
||||
PERF_COUNT_HW_CACHE_OP_WRITE = 1,
|
||||
PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
|
||||
|
||||
PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_hw_cache_op_result_id {
|
||||
PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
|
||||
PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
|
||||
|
||||
PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Special "software" events provided by the kernel, even if the hardware
|
||||
* does not support performance events. These events measure various
|
||||
* physical and sw events of the kernel (and allow the profiling of them as
|
||||
* well):
|
||||
*/
|
||||
enum perf_sw_ids {
|
||||
PERF_COUNT_SW_CPU_CLOCK = 0,
|
||||
PERF_COUNT_SW_TASK_CLOCK = 1,
|
||||
PERF_COUNT_SW_PAGE_FAULTS = 2,
|
||||
PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
|
||||
PERF_COUNT_SW_CPU_MIGRATIONS = 4,
|
||||
PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
|
||||
PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
|
||||
PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
|
||||
PERF_COUNT_SW_EMULATION_FAULTS = 8,
|
||||
PERF_COUNT_SW_DUMMY = 9,
|
||||
PERF_COUNT_SW_BPF_OUTPUT = 10,
|
||||
|
||||
PERF_COUNT_SW_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* Bits that can be set in attr.sample_type to request information
|
||||
* in the overflow packets.
|
||||
*/
|
||||
enum perf_event_sample_format {
|
||||
PERF_SAMPLE_IP = 1U << 0,
|
||||
PERF_SAMPLE_TID = 1U << 1,
|
||||
PERF_SAMPLE_TIME = 1U << 2,
|
||||
PERF_SAMPLE_ADDR = 1U << 3,
|
||||
PERF_SAMPLE_READ = 1U << 4,
|
||||
PERF_SAMPLE_CALLCHAIN = 1U << 5,
|
||||
PERF_SAMPLE_ID = 1U << 6,
|
||||
PERF_SAMPLE_CPU = 1U << 7,
|
||||
PERF_SAMPLE_PERIOD = 1U << 8,
|
||||
PERF_SAMPLE_STREAM_ID = 1U << 9,
|
||||
PERF_SAMPLE_RAW = 1U << 10,
|
||||
PERF_SAMPLE_BRANCH_STACK = 1U << 11,
|
||||
PERF_SAMPLE_REGS_USER = 1U << 12,
|
||||
PERF_SAMPLE_STACK_USER = 1U << 13,
|
||||
PERF_SAMPLE_WEIGHT = 1U << 14,
|
||||
PERF_SAMPLE_DATA_SRC = 1U << 15,
|
||||
PERF_SAMPLE_IDENTIFIER = 1U << 16,
|
||||
PERF_SAMPLE_TRANSACTION = 1U << 17,
|
||||
PERF_SAMPLE_REGS_INTR = 1U << 18,
|
||||
|
||||
PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
|
||||
};
|
||||
|
||||
/*
|
||||
* values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
|
||||
*
|
||||
* If the user does not pass priv level information via branch_sample_type,
|
||||
* the kernel uses the event's priv level. Branch and event priv levels do
|
||||
* not have to match. Branch priv level is checked for permissions.
|
||||
*
|
||||
* The branch types can be combined, however BRANCH_ANY covers all types
|
||||
* of branches and therefore it supersedes all the other types.
|
||||
*/
|
||||
enum perf_branch_sample_type_shift {
|
||||
PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
|
||||
PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
|
||||
PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
|
||||
|
||||
PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
|
||||
PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
|
||||
PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
|
||||
PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
|
||||
PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
|
||||
PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
|
||||
PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
|
||||
PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
|
||||
|
||||
PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
|
||||
PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
|
||||
PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
|
||||
|
||||
PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
|
||||
PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
|
||||
|
||||
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
|
||||
};
|
||||
|
||||
enum perf_branch_sample_type {
|
||||
PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
|
||||
PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
|
||||
|
||||
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
|
||||
};
|
||||
|
||||
#define PERF_SAMPLE_BRANCH_PLM_ALL \
|
||||
(PERF_SAMPLE_BRANCH_USER|\
|
||||
PERF_SAMPLE_BRANCH_KERNEL|\
|
||||
PERF_SAMPLE_BRANCH_HV)
|
||||
|
||||
/*
|
||||
* Values to determine ABI of the registers dump.
|
||||
*/
|
||||
enum perf_sample_regs_abi {
|
||||
PERF_SAMPLE_REGS_ABI_NONE = 0,
|
||||
PERF_SAMPLE_REGS_ABI_32 = 1,
|
||||
PERF_SAMPLE_REGS_ABI_64 = 2,
|
||||
};
|
||||
|
||||
/*
|
||||
* Values for the memory transaction event qualifier, mostly for
|
||||
* abort events. Multiple bits can be set.
|
||||
*/
|
||||
enum {
|
||||
PERF_TXN_ELISION = (1 << 0), /* From elision */
|
||||
PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
|
||||
PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
|
||||
PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
|
||||
PERF_TXN_RETRY = (1 << 4), /* Retry possible */
|
||||
PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
|
||||
PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
|
||||
PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
|
||||
|
||||
PERF_TXN_MAX = (1 << 8), /* non-ABI */
|
||||
|
||||
/* bits 32..63 are reserved for the abort code */
|
||||
|
||||
PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
|
||||
PERF_TXN_ABORT_SHIFT = 32,
|
||||
};
|
||||
|
||||
/*
|
||||
* The format of the data returned by read() on a perf event fd,
|
||||
* as specified by attr.read_format:
|
||||
*
|
||||
* struct read_format {
|
||||
* { u64 value;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } && !PERF_FORMAT_GROUP
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
|
||||
* { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
|
||||
* { u64 value;
|
||||
* { u64 id; } && PERF_FORMAT_ID
|
||||
* } cntr[nr];
|
||||
* } && PERF_FORMAT_GROUP
|
||||
* };
|
||||
*/
|
||||
enum perf_event_read_format {
|
||||
PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
|
||||
PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
|
||||
PERF_FORMAT_ID = 1U << 2,
|
||||
PERF_FORMAT_GROUP = 1U << 3,
|
||||
|
||||
PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
|
||||
#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
|
||||
#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
|
||||
#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
|
||||
/* add: sample_stack_user */
|
||||
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
|
||||
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
|
||||
|
||||
/*
|
||||
* Hardware event_id to monitor via a performance monitoring event:
|
||||
*
|
||||
* @sample_max_stack: Max number of frame pointers in a callchain,
|
||||
* should be < /proc/sys/kernel/perf_event_max_stack
|
||||
*/
|
||||
struct perf_event_attr {
|
||||
|
||||
/*
|
||||
* Major type: hardware/software/tracepoint/etc.
|
||||
*/
|
||||
__u32 type;
|
||||
|
||||
/*
|
||||
* Size of the attr structure, for fwd/bwd compat.
|
||||
*/
|
||||
__u32 size;
|
||||
|
||||
/*
|
||||
* Type specific configuration information.
|
||||
*/
|
||||
__u64 config;
|
||||
|
||||
union {
|
||||
__u64 sample_period;
|
||||
__u64 sample_freq;
|
||||
};
|
||||
|
||||
__u64 sample_type;
|
||||
__u64 read_format;
|
||||
|
||||
__u64 disabled : 1, /* off by default */
|
||||
inherit : 1, /* children inherit it */
|
||||
pinned : 1, /* must always be on PMU */
|
||||
exclusive : 1, /* only group on PMU */
|
||||
exclude_user : 1, /* don't count user */
|
||||
exclude_kernel : 1, /* ditto kernel */
|
||||
exclude_hv : 1, /* ditto hypervisor */
|
||||
exclude_idle : 1, /* don't count when idle */
|
||||
mmap : 1, /* include mmap data */
|
||||
comm : 1, /* include comm data */
|
||||
freq : 1, /* use freq, not period */
|
||||
inherit_stat : 1, /* per task counts */
|
||||
enable_on_exec : 1, /* next exec enables */
|
||||
task : 1, /* trace fork/exit */
|
||||
watermark : 1, /* wakeup_watermark */
|
||||
/*
|
||||
* precise_ip:
|
||||
*
|
||||
* 0 - SAMPLE_IP can have arbitrary skid
|
||||
* 1 - SAMPLE_IP must have constant skid
|
||||
* 2 - SAMPLE_IP requested to have 0 skid
|
||||
* 3 - SAMPLE_IP must have 0 skid
|
||||
*
|
||||
* See also PERF_RECORD_MISC_EXACT_IP
|
||||
*/
|
||||
precise_ip : 2, /* skid constraint */
|
||||
mmap_data : 1, /* non-exec mmap data */
|
||||
sample_id_all : 1, /* sample_type all events */
|
||||
|
||||
exclude_host : 1, /* don't count in host */
|
||||
exclude_guest : 1, /* don't count in guest */
|
||||
|
||||
exclude_callchain_kernel : 1, /* exclude kernel callchains */
|
||||
exclude_callchain_user : 1, /* exclude user callchains */
|
||||
mmap2 : 1, /* include mmap with inode data */
|
||||
comm_exec : 1, /* flag comm events that are due to an exec */
|
||||
use_clockid : 1, /* use @clockid for time fields */
|
||||
context_switch : 1, /* context switch data */
|
||||
write_backward : 1, /* Write ring buffer from end to beginning */
|
||||
__reserved_1 : 36;
|
||||
|
||||
union {
|
||||
__u32 wakeup_events; /* wakeup every n events */
|
||||
__u32 wakeup_watermark; /* bytes before wakeup */
|
||||
};
|
||||
|
||||
__u32 bp_type;
|
||||
union {
|
||||
__u64 bp_addr;
|
||||
__u64 config1; /* extension of config */
|
||||
};
|
||||
union {
|
||||
__u64 bp_len;
|
||||
__u64 config2; /* extension of config1 */
|
||||
};
|
||||
__u64 branch_sample_type; /* enum perf_branch_sample_type */
|
||||
|
||||
/*
|
||||
* Defines set of user regs to dump on samples.
|
||||
* See asm/perf_regs.h for details.
|
||||
*/
|
||||
__u64 sample_regs_user;
|
||||
|
||||
/*
|
||||
* Defines size of the user stack to dump on samples.
|
||||
*/
|
||||
__u32 sample_stack_user;
|
||||
|
||||
__s32 clockid;
|
||||
/*
|
||||
* Defines set of regs to dump for each sample
|
||||
* state captured on:
|
||||
* - precise = 0: PMU interrupt
|
||||
* - precise > 0: sampled instruction
|
||||
*
|
||||
* See asm/perf_regs.h for details.
|
||||
*/
|
||||
__u64 sample_regs_intr;
|
||||
|
||||
/*
|
||||
* Wakeup watermark for AUX area
|
||||
*/
|
||||
__u32 aux_watermark;
|
||||
__u16 sample_max_stack;
|
||||
__u16 __reserved_2; /* align to __u64 */
|
||||
};
|
||||
|
||||
#define perf_flags(attr) (*(&(attr)->read_format + 1))
|
||||
|
||||
/*
|
||||
* Ioctls that can be done on a perf event fd:
|
||||
*/
|
||||
#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
|
||||
#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
|
||||
#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
|
||||
#define PERF_EVENT_IOC_RESET _IO ('$', 3)
|
||||
#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
|
||||
#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
|
||||
#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
|
||||
#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
|
||||
#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
|
||||
#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
|
||||
|
||||
enum perf_event_ioc_flags {
|
||||
PERF_IOC_FLAG_GROUP = 1U << 0,
|
||||
};
|
||||
|
||||
/*
|
||||
* Structure of the page that can be mapped via mmap
|
||||
*/
|
||||
struct perf_event_mmap_page {
|
||||
__u32 version; /* version number of this structure */
|
||||
__u32 compat_version; /* lowest version this is compat with */
|
||||
|
||||
/*
|
||||
* Bits needed to read the hw events in user-space.
|
||||
*
|
||||
* u32 seq, time_mult, time_shift, index, width;
|
||||
* u64 count, enabled, running;
|
||||
* u64 cyc, time_offset;
|
||||
* s64 pmc = 0;
|
||||
*
|
||||
* do {
|
||||
* seq = pc->lock;
|
||||
* barrier()
|
||||
*
|
||||
* enabled = pc->time_enabled;
|
||||
* running = pc->time_running;
|
||||
*
|
||||
* if (pc->cap_usr_time && enabled != running) {
|
||||
* cyc = rdtsc();
|
||||
* time_offset = pc->time_offset;
|
||||
* time_mult = pc->time_mult;
|
||||
* time_shift = pc->time_shift;
|
||||
* }
|
||||
*
|
||||
* index = pc->index;
|
||||
* count = pc->offset;
|
||||
* if (pc->cap_user_rdpmc && index) {
|
||||
* width = pc->pmc_width;
|
||||
* pmc = rdpmc(index - 1);
|
||||
* }
|
||||
*
|
||||
* barrier();
|
||||
* } while (pc->lock != seq);
|
||||
*
|
||||
* NOTE: for obvious reason this only works on self-monitoring
|
||||
* processes.
|
||||
*/
|
||||
__u32 lock; /* seqlock for synchronization */
|
||||
__u32 index; /* hardware event identifier */
|
||||
__s64 offset; /* add to hardware event value */
|
||||
__u64 time_enabled; /* time event active */
|
||||
__u64 time_running; /* time event on cpu */
|
||||
union {
|
||||
__u64 capabilities;
|
||||
struct {
|
||||
__u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
|
||||
cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
|
||||
|
||||
cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
|
||||
cap_user_time : 1, /* The time_* fields are used */
|
||||
cap_user_time_zero : 1, /* The time_zero field is used */
|
||||
cap_____res : 59;
|
||||
};
|
||||
};
|
||||
|
||||
/*
|
||||
* If cap_user_rdpmc this field provides the bit-width of the value
|
||||
* read using the rdpmc() or equivalent instruction. This can be used
|
||||
* to sign extend the result like:
|
||||
*
|
||||
* pmc <<= 64 - width;
|
||||
* pmc >>= 64 - width; // signed shift right
|
||||
* count += pmc;
|
||||
*/
|
||||
__u16 pmc_width;
|
||||
|
||||
/*
|
||||
* If cap_usr_time the below fields can be used to compute the time
|
||||
* delta since time_enabled (in ns) using rdtsc or similar.
|
||||
*
|
||||
* u64 quot, rem;
|
||||
* u64 delta;
|
||||
*
|
||||
* quot = (cyc >> time_shift);
|
||||
* rem = cyc & (((u64)1 << time_shift) - 1);
|
||||
* delta = time_offset + quot * time_mult +
|
||||
* ((rem * time_mult) >> time_shift);
|
||||
*
|
||||
* Where time_offset,time_mult,time_shift and cyc are read in the
|
||||
* seqcount loop described above. This delta can then be added to
|
||||
* enabled and possible running (if index), improving the scaling:
|
||||
*
|
||||
* enabled += delta;
|
||||
* if (index)
|
||||
* running += delta;
|
||||
*
|
||||
* quot = count / running;
|
||||
* rem = count % running;
|
||||
* count = quot * enabled + (rem * enabled) / running;
|
||||
*/
|
||||
__u16 time_shift;
|
||||
__u32 time_mult;
|
||||
__u64 time_offset;
|
||||
/*
|
||||
* If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
|
||||
* from sample timestamps.
|
||||
*
|
||||
* time = timestamp - time_zero;
|
||||
* quot = time / time_mult;
|
||||
* rem = time % time_mult;
|
||||
* cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
|
||||
*
|
||||
* And vice versa:
|
||||
*
|
||||
* quot = cyc >> time_shift;
|
||||
* rem = cyc & (((u64)1 << time_shift) - 1);
|
||||
* timestamp = time_zero + quot * time_mult +
|
||||
* ((rem * time_mult) >> time_shift);
|
||||
*/
|
||||
__u64 time_zero;
|
||||
__u32 size; /* Header size up to __reserved[] fields. */
|
||||
|
||||
/*
|
||||
* Hole for extension of the self monitor capabilities
|
||||
*/
|
||||
|
||||
__u8 __reserved[118*8+4]; /* align to 1k. */
|
||||
|
||||
/*
|
||||
* Control data for the mmap() data buffer.
|
||||
*
|
||||
* User-space reading the @data_head value should issue an smp_rmb(),
|
||||
* after reading this value.
|
||||
*
|
||||
* When the mapping is PROT_WRITE the @data_tail value should be
|
||||
* written by userspace to reflect the last read data, after issueing
|
||||
* an smp_mb() to separate the data read from the ->data_tail store.
|
||||
* In this case the kernel will not over-write unread data.
|
||||
*
|
||||
* See perf_output_put_handle() for the data ordering.
|
||||
*
|
||||
* data_{offset,size} indicate the location and size of the perf record
|
||||
* buffer within the mmapped area.
|
||||
*/
|
||||
__u64 data_head; /* head in the data section */
|
||||
__u64 data_tail; /* user-space written tail */
|
||||
__u64 data_offset; /* where the buffer starts */
|
||||
__u64 data_size; /* data buffer size */
|
||||
|
||||
/*
|
||||
* AUX area is defined by aux_{offset,size} fields that should be set
|
||||
* by the userspace, so that
|
||||
*
|
||||
* aux_offset >= data_offset + data_size
|
||||
*
|
||||
* prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
|
||||
*
|
||||
* Ring buffer pointers aux_{head,tail} have the same semantics as
|
||||
* data_{head,tail} and same ordering rules apply.
|
||||
*/
|
||||
__u64 aux_head;
|
||||
__u64 aux_tail;
|
||||
__u64 aux_offset;
|
||||
__u64 aux_size;
|
||||
};
|
||||
|
||||
#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
|
||||
#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
|
||||
#define PERF_RECORD_MISC_KERNEL (1 << 0)
|
||||
#define PERF_RECORD_MISC_USER (2 << 0)
|
||||
#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
|
||||
#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
|
||||
#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
|
||||
|
||||
/*
|
||||
* Indicates that /proc/PID/maps parsing are truncated by time out.
|
||||
*/
|
||||
#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
|
||||
/*
|
||||
* PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
|
||||
* different events so can reuse the same bit position.
|
||||
* Ditto PERF_RECORD_MISC_SWITCH_OUT.
|
||||
*/
|
||||
#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
|
||||
#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
|
||||
#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
|
||||
/*
|
||||
* Indicates that the content of PERF_SAMPLE_IP points to
|
||||
* the actual instruction that triggered the event. See also
|
||||
* perf_event_attr::precise_ip.
|
||||
*/
|
||||
#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
|
||||
/*
|
||||
* Reserve the last bit to indicate some extended misc field
|
||||
*/
|
||||
#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
|
||||
|
||||
struct perf_event_header {
|
||||
__u32 type;
|
||||
__u16 misc;
|
||||
__u16 size;
|
||||
};
|
||||
|
||||
enum perf_event_type {
|
||||
|
||||
/*
|
||||
* If perf_event_attr.sample_id_all is set then all event types will
|
||||
* have the sample_type selected fields related to where/when
|
||||
* (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
|
||||
* IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
|
||||
* just after the perf_event_header and the fields already present for
|
||||
* the existing fields, i.e. at the end of the payload. That way a newer
|
||||
* perf.data file will be supported by older perf tools, with these new
|
||||
* optional fields being ignored.
|
||||
*
|
||||
* struct sample_id {
|
||||
* { u32 pid, tid; } && PERF_SAMPLE_TID
|
||||
* { u64 time; } && PERF_SAMPLE_TIME
|
||||
* { u64 id; } && PERF_SAMPLE_ID
|
||||
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
|
||||
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
||||
* { u64 id; } && PERF_SAMPLE_IDENTIFIER
|
||||
* } && perf_event_attr::sample_id_all
|
||||
*
|
||||
* Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
|
||||
* advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
|
||||
* relative to header.size.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The MMAP events record the PROT_EXEC mappings so that we can
|
||||
* correlate userspace IPs to code. They have the following structure:
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* u32 pid, tid;
|
||||
* u64 addr;
|
||||
* u64 len;
|
||||
* u64 pgoff;
|
||||
* char filename[];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_MMAP = 1,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 id;
|
||||
* u64 lost;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_LOST = 2,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* u32 pid, tid;
|
||||
* char comm[];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_COMM = 3,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, ppid;
|
||||
* u32 tid, ptid;
|
||||
* u64 time;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_EXIT = 4,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u64 time;
|
||||
* u64 id;
|
||||
* u64 stream_id;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_THROTTLE = 5,
|
||||
PERF_RECORD_UNTHROTTLE = 6,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, ppid;
|
||||
* u32 tid, ptid;
|
||||
* u64 time;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_FORK = 7,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid, tid;
|
||||
*
|
||||
* struct read_format values;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_READ = 8,
|
||||
|
||||
/*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* #
|
||||
* # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
|
||||
* # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
|
||||
* # is fixed relative to header.
|
||||
* #
|
||||
*
|
||||
* { u64 id; } && PERF_SAMPLE_IDENTIFIER
|
||||
* { u64 ip; } && PERF_SAMPLE_IP
|
||||
* { u32 pid, tid; } && PERF_SAMPLE_TID
|
||||
* { u64 time; } && PERF_SAMPLE_TIME
|
||||
* { u64 addr; } && PERF_SAMPLE_ADDR
|
||||
* { u64 id; } && PERF_SAMPLE_ID
|
||||
* { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
|
||||
* { u32 cpu, res; } && PERF_SAMPLE_CPU
|
||||
* { u64 period; } && PERF_SAMPLE_PERIOD
|
||||
*
|
||||
* { struct read_format values; } && PERF_SAMPLE_READ
|
||||
*
|
||||
* { u64 nr,
|
||||
* u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
|
||||
*
|
||||
* #
|
||||
* # The RAW record below is opaque data wrt the ABI
|
||||
* #
|
||||
* # That is, the ABI doesn't make any promises wrt to
|
||||
* # the stability of its content, it may vary depending
|
||||
* # on event, hardware, kernel version and phase of
|
||||
* # the moon.
|
||||
* #
|
||||
* # In other words, PERF_SAMPLE_RAW contents are not an ABI.
|
||||
* #
|
||||
*
|
||||
* { u32 size;
|
||||
* char data[size];}&& PERF_SAMPLE_RAW
|
||||
*
|
||||
* { u64 nr;
|
||||
* { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
|
||||
*
|
||||
* { u64 abi; # enum perf_sample_regs_abi
|
||||
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
|
||||
*
|
||||
* { u64 size;
|
||||
* char data[size];
|
||||
* u64 dyn_size; } && PERF_SAMPLE_STACK_USER
|
||||
*
|
||||
* { u64 weight; } && PERF_SAMPLE_WEIGHT
|
||||
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
|
||||
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
|
||||
* { u64 abi; # enum perf_sample_regs_abi
|
||||
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_SAMPLE = 9,
|
||||
|
||||
/*
|
||||
* The MMAP2 records are an augmented version of MMAP, they add
|
||||
* maj, min, ino numbers to be used to uniquely identify each mapping
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* u32 pid, tid;
|
||||
* u64 addr;
|
||||
* u64 len;
|
||||
* u64 pgoff;
|
||||
* u32 maj;
|
||||
* u32 min;
|
||||
* u64 ino;
|
||||
* u64 ino_generation;
|
||||
* u32 prot, flags;
|
||||
* char filename[];
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_MMAP2 = 10,
|
||||
|
||||
/*
|
||||
* Records that new data landed in the AUX buffer part.
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* u64 aux_offset;
|
||||
* u64 aux_size;
|
||||
* u64 flags;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_AUX = 11,
|
||||
|
||||
/*
|
||||
* Indicates that instruction trace has started
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 pid;
|
||||
* u32 tid;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_ITRACE_START = 12,
|
||||
|
||||
/*
|
||||
* Records the dropped/lost sample number.
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
*
|
||||
* u64 lost;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_LOST_SAMPLES = 13,
|
||||
|
||||
/*
|
||||
* Records a context switch in or out (flagged by
|
||||
* PERF_RECORD_MISC_SWITCH_OUT). See also
|
||||
* PERF_RECORD_SWITCH_CPU_WIDE.
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_SWITCH = 14,
|
||||
|
||||
/*
|
||||
* CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
|
||||
* next_prev_tid that are the next (switching out) or previous
|
||||
* (switching in) pid/tid.
|
||||
*
|
||||
* struct {
|
||||
* struct perf_event_header header;
|
||||
* u32 next_prev_pid;
|
||||
* u32 next_prev_tid;
|
||||
* struct sample_id sample_id;
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_SWITCH_CPU_WIDE = 15,
|
||||
|
||||
PERF_RECORD_MAX, /* non-ABI */
|
||||
};
|
||||
|
||||
#define PERF_MAX_STACK_DEPTH 127
|
||||
#define PERF_MAX_CONTEXTS_PER_STACK 8
|
||||
|
||||
enum perf_callchain_context {
|
||||
PERF_CONTEXT_HV = (__u64)-32,
|
||||
PERF_CONTEXT_KERNEL = (__u64)-128,
|
||||
PERF_CONTEXT_USER = (__u64)-512,
|
||||
|
||||
PERF_CONTEXT_GUEST = (__u64)-2048,
|
||||
PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
|
||||
PERF_CONTEXT_GUEST_USER = (__u64)-2560,
|
||||
|
||||
PERF_CONTEXT_MAX = (__u64)-4095,
|
||||
};
|
||||
|
||||
/**
|
||||
* PERF_RECORD_AUX::flags bits
|
||||
*/
|
||||
#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
|
||||
#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
|
||||
|
||||
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
|
||||
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
|
||||
#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
|
||||
#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
|
||||
|
||||
union perf_mem_data_src {
|
||||
__u64 val;
|
||||
struct {
|
||||
__u64 mem_op:5, /* type of opcode */
|
||||
mem_lvl:14, /* memory hierarchy level */
|
||||
mem_snoop:5, /* snoop mode */
|
||||
mem_lock:2, /* lock instr */
|
||||
mem_dtlb:7, /* tlb access */
|
||||
mem_rsvd:31;
|
||||
};
|
||||
};
|
||||
|
||||
/* type of opcode (load/store/prefetch,code) */
|
||||
#define PERF_MEM_OP_NA 0x01 /* not available */
|
||||
#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
|
||||
#define PERF_MEM_OP_STORE 0x04 /* store instruction */
|
||||
#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
|
||||
#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
|
||||
#define PERF_MEM_OP_SHIFT 0
|
||||
|
||||
/* memory hierarchy (memory level, hit or miss) */
|
||||
#define PERF_MEM_LVL_NA 0x01 /* not available */
|
||||
#define PERF_MEM_LVL_HIT 0x02 /* hit level */
|
||||
#define PERF_MEM_LVL_MISS 0x04 /* miss level */
|
||||
#define PERF_MEM_LVL_L1 0x08 /* L1 */
|
||||
#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
|
||||
#define PERF_MEM_LVL_L2 0x20 /* L2 */
|
||||
#define PERF_MEM_LVL_L3 0x40 /* L3 */
|
||||
#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
|
||||
#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
|
||||
#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
|
||||
#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
|
||||
#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
|
||||
#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
|
||||
#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
|
||||
#define PERF_MEM_LVL_SHIFT 5
|
||||
|
||||
/* snoop mode */
|
||||
#define PERF_MEM_SNOOP_NA 0x01 /* not available */
|
||||
#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
|
||||
#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
|
||||
#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
|
||||
#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
|
||||
#define PERF_MEM_SNOOP_SHIFT 19
|
||||
|
||||
/* locked instruction */
|
||||
#define PERF_MEM_LOCK_NA 0x01 /* not available */
|
||||
#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
|
||||
#define PERF_MEM_LOCK_SHIFT 24
|
||||
|
||||
/* TLB access */
|
||||
#define PERF_MEM_TLB_NA 0x01 /* not available */
|
||||
#define PERF_MEM_TLB_HIT 0x02 /* hit level */
|
||||
#define PERF_MEM_TLB_MISS 0x04 /* miss level */
|
||||
#define PERF_MEM_TLB_L1 0x08 /* L1 */
|
||||
#define PERF_MEM_TLB_L2 0x10 /* L2 */
|
||||
#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
|
||||
#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
|
||||
#define PERF_MEM_TLB_SHIFT 26
|
||||
|
||||
#define PERF_MEM_S(a, s) \
|
||||
(((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
|
||||
|
||||
/*
|
||||
* single taken branch record layout:
|
||||
*
|
||||
* from: source instruction (may not always be a branch insn)
|
||||
* to: branch target
|
||||
* mispred: branch target was mispredicted
|
||||
* predicted: branch target was predicted
|
||||
*
|
||||
* support for mispred, predicted is optional. In case it
|
||||
* is not supported mispred = predicted = 0.
|
||||
*
|
||||
* in_tx: running in a hardware transaction
|
||||
* abort: aborting a hardware transaction
|
||||
* cycles: cycles from last branch (or 0 if not supported)
|
||||
*/
|
||||
struct perf_branch_entry {
|
||||
__u64 from;
|
||||
__u64 to;
|
||||
__u64 mispred:1, /* target mispredicted */
|
||||
predicted:1,/* target predicted */
|
||||
in_tx:1, /* in transaction */
|
||||
abort:1, /* transaction abort */
|
||||
cycles:16, /* cycle count to last branch */
|
||||
reserved:44;
|
||||
};
|
||||
|
||||
#endif /* _UAPI_LINUX_PERF_EVENT_H */
|
|
@ -10,15 +10,23 @@ endif
|
|||
|
||||
CC = $(CROSS_COMPILE)gcc
|
||||
AR = $(CROSS_COMPILE)ar
|
||||
LD = $(CROSS_COMPILE)ld
|
||||
|
||||
MAKEFLAGS += --no-print-directory
|
||||
|
||||
LIBFILE = $(OUTPUT)libapi.a
|
||||
|
||||
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
|
||||
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
|
||||
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
|
||||
|
||||
# Treat warnings as errors unless directed not to
|
||||
ifneq ($(WERROR),0)
|
||||
CFLAGS += -Werror
|
||||
endif
|
||||
|
||||
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
|
||||
CFLAGS += -I$(srctree)/tools/lib/api
|
||||
CFLAGS += -I$(srctree)/tools/include
|
||||
|
||||
RM = rm -f
|
||||
|
||||
|
|
|
@ -85,7 +85,8 @@ int fdarray__add(struct fdarray *fda, int fd, short revents)
|
|||
}
|
||||
|
||||
int fdarray__filter(struct fdarray *fda, short revents,
|
||||
void (*entry_destructor)(struct fdarray *fda, int fd))
|
||||
void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
|
||||
void *arg)
|
||||
{
|
||||
int fd, nr = 0;
|
||||
|
||||
|
@ -95,7 +96,7 @@ int fdarray__filter(struct fdarray *fda, short revents,
|
|||
for (fd = 0; fd < fda->nr; ++fd) {
|
||||
if (fda->entries[fd].revents & revents) {
|
||||
if (entry_destructor)
|
||||
entry_destructor(fda, fd);
|
||||
entry_destructor(fda, fd, arg);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -22,6 +22,7 @@ struct fdarray {
|
|||
struct pollfd *entries;
|
||||
union {
|
||||
int idx;
|
||||
void *ptr;
|
||||
} *priv;
|
||||
};
|
||||
|
||||
|
@ -34,7 +35,8 @@ void fdarray__delete(struct fdarray *fda);
|
|||
int fdarray__add(struct fdarray *fda, int fd, short revents);
|
||||
int fdarray__poll(struct fdarray *fda, int timeout);
|
||||
int fdarray__filter(struct fdarray *fda, short revents,
|
||||
void (*entry_destructor)(struct fdarray *fda, int fd));
|
||||
void (*entry_destructor)(struct fdarray *fda, int fd, void *arg),
|
||||
void *arg);
|
||||
int fdarray__grow(struct fdarray *fda, int extra);
|
||||
int fdarray__fprintf(struct fdarray *fda, FILE *fp);
|
||||
|
||||
|
|
|
@ -283,6 +283,11 @@ int filename__read_int(const char *filename, int *value)
|
|||
return err;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parses @value out of @filename with strtoull.
|
||||
* By using 0 for base, the strtoull detects the
|
||||
* base automatically (see man strtoull).
|
||||
*/
|
||||
int filename__read_ull(const char *filename, unsigned long long *value)
|
||||
{
|
||||
char line[64];
|
||||
|
@ -292,7 +297,7 @@ int filename__read_ull(const char *filename, unsigned long long *value)
|
|||
return -1;
|
||||
|
||||
if (read(fd, line, sizeof(line)) > 0) {
|
||||
*value = strtoull(line, NULL, 10);
|
||||
*value = strtoull(line, NULL, 0);
|
||||
if (*value != ULLONG_MAX)
|
||||
err = 0;
|
||||
}
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <linux/string.h>
|
||||
#include <errno.h>
|
||||
#include <unistd.h>
|
||||
#include "fs.h"
|
||||
|
@ -118,7 +119,7 @@ static int strerror_open(int err, char *buf, size_t size, const char *filename)
|
|||
}
|
||||
break;
|
||||
default:
|
||||
snprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
|
||||
snprintf(buf, size, "%s", str_error_r(err, sbuf, sizeof(sbuf)));
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ FEATURE_USER = .libbpf
|
|||
FEATURE_TESTS = libelf libelf-getphdrnum libelf-mmap bpf
|
||||
FEATURE_DISPLAY = libelf bpf
|
||||
|
||||
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/arch/$(ARCH)/include/uapi -I$(srctree)/include/uapi
|
||||
INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi
|
||||
FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES)
|
||||
|
||||
check_feat := 1
|
||||
|
@ -154,6 +154,12 @@ all: fixdep $(VERSION_FILES) all_cmd
|
|||
all_cmd: $(CMD_TARGETS)
|
||||
|
||||
$(BPF_IN): force elfdep bpfdep
|
||||
@(test -f ../../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \
|
||||
(diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \
|
||||
echo "Warning: tools/include/uapi/linux/bpf.h differs from kernel" >&2 )) || true
|
||||
@(test -f ../../../include/uapi/linux/bpf_common.h -a -f ../../../include/uapi/linux/bpf_common.h && ( \
|
||||
(diff -B ../../include/uapi/linux/bpf_common.h ../../../include/uapi/linux/bpf_common.h >/dev/null) || \
|
||||
echo "Warning: tools/include/uapi/linux/bpf_common.h differs from kernel" >&2 )) || true
|
||||
$(Q)$(MAKE) $(build)=libbpf
|
||||
|
||||
$(OUTPUT)libbpf.so: $(BPF_IN)
|
||||
|
|
|
@ -4,6 +4,19 @@
|
|||
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
|
||||
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
|
||||
* Copyright (C) 2015 Huawei Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License (not later!)
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses>
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
|
|
@ -4,6 +4,19 @@
|
|||
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
|
||||
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
|
||||
* Copyright (C) 2015 Huawei Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License (not later!)
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses>
|
||||
*/
|
||||
#ifndef __BPF_BPF_H
|
||||
#define __BPF_BPF_H
|
||||
|
|
|
@ -4,6 +4,19 @@
|
|||
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
|
||||
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
|
||||
* Copyright (C) 2015 Huawei Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License (not later!)
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses>
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
@ -71,12 +84,13 @@ static const char *libbpf_strerror_table[NR_ERRNO] = {
|
|||
[ERRCODE_OFFSET(LIBELF)] = "Something wrong in libelf",
|
||||
[ERRCODE_OFFSET(FORMAT)] = "BPF object format invalid",
|
||||
[ERRCODE_OFFSET(KVERSION)] = "'version' section incorrect or lost",
|
||||
[ERRCODE_OFFSET(ENDIAN)] = "Endian missmatch",
|
||||
[ERRCODE_OFFSET(ENDIAN)] = "Endian mismatch",
|
||||
[ERRCODE_OFFSET(INTERNAL)] = "Internal error in libbpf",
|
||||
[ERRCODE_OFFSET(RELOC)] = "Relocation failed",
|
||||
[ERRCODE_OFFSET(VERIFY)] = "Kernel verifier blocks program loading",
|
||||
[ERRCODE_OFFSET(PROG2BIG)] = "Program too big",
|
||||
[ERRCODE_OFFSET(KVER)] = "Incorrect kernel version",
|
||||
[ERRCODE_OFFSET(PROGTYPE)] = "Kernel doesn't support this program type",
|
||||
};
|
||||
|
||||
int libbpf_strerror(int err, char *buf, size_t size)
|
||||
|
@ -145,6 +159,7 @@ struct bpf_program {
|
|||
char *section_name;
|
||||
struct bpf_insn *insns;
|
||||
size_t insns_cnt;
|
||||
enum bpf_prog_type type;
|
||||
|
||||
struct {
|
||||
int insn_idx;
|
||||
|
@ -286,6 +301,7 @@ bpf_program__init(void *data, size_t size, char *name, int idx,
|
|||
prog->idx = idx;
|
||||
prog->instances.fds = NULL;
|
||||
prog->instances.nr = -1;
|
||||
prog->type = BPF_PROG_TYPE_KPROBE;
|
||||
|
||||
return 0;
|
||||
errout:
|
||||
|
@ -881,8 +897,8 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
|
|||
}
|
||||
|
||||
static int
|
||||
load_program(struct bpf_insn *insns, int insns_cnt,
|
||||
char *license, u32 kern_version, int *pfd)
|
||||
load_program(enum bpf_prog_type type, struct bpf_insn *insns,
|
||||
int insns_cnt, char *license, u32 kern_version, int *pfd)
|
||||
{
|
||||
int ret;
|
||||
char *log_buf;
|
||||
|
@ -894,9 +910,8 @@ load_program(struct bpf_insn *insns, int insns_cnt,
|
|||
if (!log_buf)
|
||||
pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
|
||||
|
||||
ret = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
|
||||
insns_cnt, license, kern_version,
|
||||
log_buf, BPF_LOG_BUF_SIZE);
|
||||
ret = bpf_load_program(type, insns, insns_cnt, license,
|
||||
kern_version, log_buf, BPF_LOG_BUF_SIZE);
|
||||
|
||||
if (ret >= 0) {
|
||||
*pfd = ret;
|
||||
|
@ -912,15 +927,27 @@ load_program(struct bpf_insn *insns, int insns_cnt,
|
|||
pr_warning("-- BEGIN DUMP LOG ---\n");
|
||||
pr_warning("\n%s\n", log_buf);
|
||||
pr_warning("-- END LOG --\n");
|
||||
} else if (insns_cnt >= BPF_MAXINSNS) {
|
||||
pr_warning("Program too large (%d insns), at most %d insns\n",
|
||||
insns_cnt, BPF_MAXINSNS);
|
||||
ret = -LIBBPF_ERRNO__PROG2BIG;
|
||||
} else {
|
||||
if (insns_cnt >= BPF_MAXINSNS) {
|
||||
pr_warning("Program too large (%d insns), at most %d insns\n",
|
||||
insns_cnt, BPF_MAXINSNS);
|
||||
ret = -LIBBPF_ERRNO__PROG2BIG;
|
||||
} else if (log_buf) {
|
||||
pr_warning("log buffer is empty\n");
|
||||
ret = -LIBBPF_ERRNO__KVER;
|
||||
/* Wrong program type? */
|
||||
if (type != BPF_PROG_TYPE_KPROBE) {
|
||||
int fd;
|
||||
|
||||
fd = bpf_load_program(BPF_PROG_TYPE_KPROBE, insns,
|
||||
insns_cnt, license, kern_version,
|
||||
NULL, 0);
|
||||
if (fd >= 0) {
|
||||
close(fd);
|
||||
ret = -LIBBPF_ERRNO__PROGTYPE;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (log_buf)
|
||||
ret = -LIBBPF_ERRNO__KVER;
|
||||
}
|
||||
|
||||
out:
|
||||
|
@ -955,7 +982,7 @@ bpf_program__load(struct bpf_program *prog,
|
|||
pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
|
||||
prog->section_name, prog->instances.nr);
|
||||
}
|
||||
err = load_program(prog->insns, prog->insns_cnt,
|
||||
err = load_program(prog->type, prog->insns, prog->insns_cnt,
|
||||
license, kern_version, &fd);
|
||||
if (!err)
|
||||
prog->instances.fds[0] = fd;
|
||||
|
@ -984,7 +1011,7 @@ bpf_program__load(struct bpf_program *prog,
|
|||
continue;
|
||||
}
|
||||
|
||||
err = load_program(result.new_insn_ptr,
|
||||
err = load_program(prog->type, result.new_insn_ptr,
|
||||
result.new_insn_cnt,
|
||||
license, kern_version, &fd);
|
||||
|
||||
|
@ -1186,20 +1213,14 @@ bpf_object__next(struct bpf_object *prev)
|
|||
return next;
|
||||
}
|
||||
|
||||
const char *
|
||||
bpf_object__get_name(struct bpf_object *obj)
|
||||
const char *bpf_object__name(struct bpf_object *obj)
|
||||
{
|
||||
if (!obj)
|
||||
return ERR_PTR(-EINVAL);
|
||||
return obj->path;
|
||||
return obj ? obj->path : ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
unsigned int
|
||||
bpf_object__get_kversion(struct bpf_object *obj)
|
||||
unsigned int bpf_object__kversion(struct bpf_object *obj)
|
||||
{
|
||||
if (!obj)
|
||||
return 0;
|
||||
return obj->kern_version;
|
||||
return obj ? obj->kern_version : 0;
|
||||
}
|
||||
|
||||
struct bpf_program *
|
||||
|
@ -1224,9 +1245,8 @@ bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
|
|||
return &obj->programs[idx];
|
||||
}
|
||||
|
||||
int bpf_program__set_private(struct bpf_program *prog,
|
||||
void *priv,
|
||||
bpf_program_clear_priv_t clear_priv)
|
||||
int bpf_program__set_priv(struct bpf_program *prog, void *priv,
|
||||
bpf_program_clear_priv_t clear_priv)
|
||||
{
|
||||
if (prog->priv && prog->clear_priv)
|
||||
prog->clear_priv(prog, prog->priv);
|
||||
|
@ -1236,10 +1256,9 @@ int bpf_program__set_private(struct bpf_program *prog,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int bpf_program__get_private(struct bpf_program *prog, void **ppriv)
|
||||
void *bpf_program__priv(struct bpf_program *prog)
|
||||
{
|
||||
*ppriv = prog->priv;
|
||||
return 0;
|
||||
return prog ? prog->priv : ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
|
||||
|
@ -1311,32 +1330,61 @@ int bpf_program__nth_fd(struct bpf_program *prog, int n)
|
|||
return fd;
|
||||
}
|
||||
|
||||
int bpf_map__get_fd(struct bpf_map *map)
|
||||
static void bpf_program__set_type(struct bpf_program *prog,
|
||||
enum bpf_prog_type type)
|
||||
{
|
||||
if (!map)
|
||||
return -EINVAL;
|
||||
|
||||
return map->fd;
|
||||
prog->type = type;
|
||||
}
|
||||
|
||||
int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef)
|
||||
int bpf_program__set_tracepoint(struct bpf_program *prog)
|
||||
{
|
||||
if (!map || !pdef)
|
||||
if (!prog)
|
||||
return -EINVAL;
|
||||
|
||||
*pdef = map->def;
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const char *bpf_map__get_name(struct bpf_map *map)
|
||||
int bpf_program__set_kprobe(struct bpf_program *prog)
|
||||
{
|
||||
if (!map)
|
||||
return NULL;
|
||||
return map->name;
|
||||
if (!prog)
|
||||
return -EINVAL;
|
||||
bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_map__set_private(struct bpf_map *map, void *priv,
|
||||
bpf_map_clear_priv_t clear_priv)
|
||||
static bool bpf_program__is_type(struct bpf_program *prog,
|
||||
enum bpf_prog_type type)
|
||||
{
|
||||
return prog ? (prog->type == type) : false;
|
||||
}
|
||||
|
||||
bool bpf_program__is_tracepoint(struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__is_type(prog, BPF_PROG_TYPE_TRACEPOINT);
|
||||
}
|
||||
|
||||
bool bpf_program__is_kprobe(struct bpf_program *prog)
|
||||
{
|
||||
return bpf_program__is_type(prog, BPF_PROG_TYPE_KPROBE);
|
||||
}
|
||||
|
||||
int bpf_map__fd(struct bpf_map *map)
|
||||
{
|
||||
return map ? map->fd : -EINVAL;
|
||||
}
|
||||
|
||||
const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
|
||||
{
|
||||
return map ? &map->def : ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
const char *bpf_map__name(struct bpf_map *map)
|
||||
{
|
||||
return map ? map->name : NULL;
|
||||
}
|
||||
|
||||
int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
bpf_map_clear_priv_t clear_priv)
|
||||
{
|
||||
if (!map)
|
||||
return -EINVAL;
|
||||
|
@ -1351,14 +1399,9 @@ int bpf_map__set_private(struct bpf_map *map, void *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int bpf_map__get_private(struct bpf_map *map, void **ppriv)
|
||||
void *bpf_map__priv(struct bpf_map *map)
|
||||
{
|
||||
if (!map)
|
||||
return -EINVAL;
|
||||
|
||||
if (ppriv)
|
||||
*ppriv = map->priv;
|
||||
return 0;
|
||||
return map ? map->priv : ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
struct bpf_map *
|
||||
|
@ -1389,7 +1432,7 @@ bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
|
|||
}
|
||||
|
||||
struct bpf_map *
|
||||
bpf_object__get_map_by_name(struct bpf_object *obj, const char *name)
|
||||
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
|
||||
{
|
||||
struct bpf_map *pos;
|
||||
|
||||
|
|
|
@ -4,6 +4,19 @@
|
|||
* Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
|
||||
* Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
|
||||
* Copyright (C) 2015 Huawei Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation;
|
||||
* version 2.1 of the License (not later!)
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this program; if not, see <http://www.gnu.org/licenses>
|
||||
*/
|
||||
#ifndef __BPF_LIBBPF_H
|
||||
#define __BPF_LIBBPF_H
|
||||
|
@ -19,13 +32,14 @@ enum libbpf_errno {
|
|||
LIBBPF_ERRNO__LIBELF = __LIBBPF_ERRNO__START,
|
||||
LIBBPF_ERRNO__FORMAT, /* BPF object format invalid */
|
||||
LIBBPF_ERRNO__KVERSION, /* Incorrect or no 'version' section */
|
||||
LIBBPF_ERRNO__ENDIAN, /* Endian missmatch */
|
||||
LIBBPF_ERRNO__ENDIAN, /* Endian mismatch */
|
||||
LIBBPF_ERRNO__INTERNAL, /* Internal error in libbpf */
|
||||
LIBBPF_ERRNO__RELOC, /* Relocation failed */
|
||||
LIBBPF_ERRNO__LOAD, /* Load program failure for unknown reason */
|
||||
LIBBPF_ERRNO__VERIFY, /* Kernel verifier blocks program loading */
|
||||
LIBBPF_ERRNO__PROG2BIG, /* Program too big */
|
||||
LIBBPF_ERRNO__KVER, /* Incorrect kernel version */
|
||||
LIBBPF_ERRNO__PROGTYPE, /* Kernel doesn't support this program type */
|
||||
__LIBBPF_ERRNO__END,
|
||||
};
|
||||
|
||||
|
@ -55,8 +69,8 @@ void bpf_object__close(struct bpf_object *object);
|
|||
/* Load/unload object into/from kernel */
|
||||
int bpf_object__load(struct bpf_object *obj);
|
||||
int bpf_object__unload(struct bpf_object *obj);
|
||||
const char *bpf_object__get_name(struct bpf_object *obj);
|
||||
unsigned int bpf_object__get_kversion(struct bpf_object *obj);
|
||||
const char *bpf_object__name(struct bpf_object *obj);
|
||||
unsigned int bpf_object__kversion(struct bpf_object *obj);
|
||||
|
||||
struct bpf_object *bpf_object__next(struct bpf_object *prev);
|
||||
#define bpf_object__for_each_safe(pos, tmp) \
|
||||
|
@ -78,11 +92,10 @@ struct bpf_program *bpf_program__next(struct bpf_program *prog,
|
|||
typedef void (*bpf_program_clear_priv_t)(struct bpf_program *,
|
||||
void *);
|
||||
|
||||
int bpf_program__set_private(struct bpf_program *prog, void *priv,
|
||||
bpf_program_clear_priv_t clear_priv);
|
||||
int bpf_program__set_priv(struct bpf_program *prog, void *priv,
|
||||
bpf_program_clear_priv_t clear_priv);
|
||||
|
||||
int bpf_program__get_private(struct bpf_program *prog,
|
||||
void **ppriv);
|
||||
void *bpf_program__priv(struct bpf_program *prog);
|
||||
|
||||
const char *bpf_program__title(struct bpf_program *prog, bool needs_copy);
|
||||
|
||||
|
@ -152,6 +165,15 @@ int bpf_program__set_prep(struct bpf_program *prog, int nr_instance,
|
|||
|
||||
int bpf_program__nth_fd(struct bpf_program *prog, int n);
|
||||
|
||||
/*
|
||||
* Adjust type of bpf program. Default is kprobe.
|
||||
*/
|
||||
int bpf_program__set_tracepoint(struct bpf_program *prog);
|
||||
int bpf_program__set_kprobe(struct bpf_program *prog);
|
||||
|
||||
bool bpf_program__is_tracepoint(struct bpf_program *prog);
|
||||
bool bpf_program__is_kprobe(struct bpf_program *prog);
|
||||
|
||||
/*
|
||||
* We don't need __attribute__((packed)) now since it is
|
||||
* unnecessary for 'bpf_map_def' because they are all aligned.
|
||||
|
@ -171,7 +193,7 @@ struct bpf_map_def {
|
|||
*/
|
||||
struct bpf_map;
|
||||
struct bpf_map *
|
||||
bpf_object__get_map_by_name(struct bpf_object *obj, const char *name);
|
||||
bpf_object__find_map_by_name(struct bpf_object *obj, const char *name);
|
||||
|
||||
struct bpf_map *
|
||||
bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
|
||||
|
@ -180,13 +202,13 @@ bpf_map__next(struct bpf_map *map, struct bpf_object *obj);
|
|||
(pos) != NULL; \
|
||||
(pos) = bpf_map__next((pos), (obj)))
|
||||
|
||||
int bpf_map__get_fd(struct bpf_map *map);
|
||||
int bpf_map__get_def(struct bpf_map *map, struct bpf_map_def *pdef);
|
||||
const char *bpf_map__get_name(struct bpf_map *map);
|
||||
int bpf_map__fd(struct bpf_map *map);
|
||||
const struct bpf_map_def *bpf_map__def(struct bpf_map *map);
|
||||
const char *bpf_map__name(struct bpf_map *map);
|
||||
|
||||
typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *);
|
||||
int bpf_map__set_private(struct bpf_map *map, void *priv,
|
||||
bpf_map_clear_priv_t clear_priv);
|
||||
int bpf_map__get_private(struct bpf_map *map, void **ppriv);
|
||||
int bpf_map__set_priv(struct bpf_map *map, void *priv,
|
||||
bpf_map_clear_priv_t clear_priv);
|
||||
void *bpf_map__priv(struct bpf_map *map);
|
||||
|
||||
#endif
|
||||
|
|
26
tools/lib/str_error_r.c
Normal file
26
tools/lib/str_error_r.c
Normal file
|
@ -0,0 +1,26 @@
|
|||
#undef _GNU_SOURCE
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
/*
|
||||
* The tools so far have been using the strerror_r() GNU variant, that returns
|
||||
* a string, be it the buffer passed or something else.
|
||||
*
|
||||
* But that, besides being tricky in cases where we expect that the function
|
||||
* using strerror_r() returns the error formatted in a provided buffer (we have
|
||||
* to check if it returned something else and copy that instead), breaks the
|
||||
* build on systems not using glibc, like Alpine Linux, where musl libc is
|
||||
* used.
|
||||
*
|
||||
* So, introduce yet another wrapper, str_error_r(), that has the GNU
|
||||
* interface, but uses the portable XSI variant of strerror_r(), so that users
|
||||
* rest asured that the provided buffer is used and it is what is returned.
|
||||
*/
|
||||
char *str_error_r(int errnum, char *buf, size_t buflen)
|
||||
{
|
||||
int err = strerror_r(errnum, buf, buflen);
|
||||
if (err)
|
||||
snprintf(buf, buflen, "INTERNAL ERROR: strerror_r(%d, %p, %zd)=%d", errnum, buf, buflen, err);
|
||||
return buf;
|
||||
}
|
|
@ -19,7 +19,13 @@ MAKEFLAGS += --no-print-directory
|
|||
LIBFILE = $(OUTPUT)libsubcmd.a
|
||||
|
||||
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
|
||||
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
|
||||
CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
|
||||
|
||||
# Treat warnings as errors unless directed not to
|
||||
ifneq ($(WERROR),0)
|
||||
CFLAGS += -Werror
|
||||
endif
|
||||
|
||||
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
|
||||
|
||||
CFLAGS += -I$(srctree)/tools/include/
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include <sys/stat.h>
|
||||
#include <fcntl.h>
|
||||
#include <string.h>
|
||||
#include <linux/string.h>
|
||||
#include <errno.h>
|
||||
#include <sys/wait.h>
|
||||
#include "subcmd-util.h"
|
||||
|
@ -109,7 +110,7 @@ int start_command(struct child_process *cmd)
|
|||
|
||||
if (cmd->dir && chdir(cmd->dir))
|
||||
die("exec %s: cd to %s failed (%s)", cmd->argv[0],
|
||||
cmd->dir, strerror_r(errno, sbuf, sizeof(sbuf)));
|
||||
cmd->dir, str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
if (cmd->env) {
|
||||
for (; *cmd->env; cmd->env++) {
|
||||
if (strchr(*cmd->env, '='))
|
||||
|
@ -173,7 +174,7 @@ static int wait_or_whine(pid_t pid)
|
|||
if (errno == EINTR)
|
||||
continue;
|
||||
fprintf(stderr, " Error: waitpid failed (%s)",
|
||||
strerror_r(errno, sbuf, sizeof(sbuf)));
|
||||
str_error_r(errno, sbuf, sizeof(sbuf)));
|
||||
return -ERR_RUN_COMMAND_WAITPID;
|
||||
}
|
||||
if (waiting != pid)
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
* Frederic Weisbecker gave his permission to relicense the code to
|
||||
* the Lesser General Public License.
|
||||
*/
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
@ -31,8 +32,9 @@
|
|||
#include <errno.h>
|
||||
#include <stdint.h>
|
||||
#include <limits.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <netinet/ip6.h>
|
||||
#include <netinet/in.h>
|
||||
#include "event-parse.h"
|
||||
#include "event-utils.h"
|
||||
|
||||
|
@ -6131,12 +6133,7 @@ int pevent_strerror(struct pevent *pevent __maybe_unused,
|
|||
const char *msg;
|
||||
|
||||
if (errnum >= 0) {
|
||||
msg = strerror_r(errnum, buf, buflen);
|
||||
if (msg != buf) {
|
||||
size_t len = strlen(msg);
|
||||
memcpy(buf, msg, min(buflen - 1, len));
|
||||
*(buf + min(buflen - 1, len)) = '\0';
|
||||
}
|
||||
str_error_r(errnum, buf, buflen);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue