arm64 fixes and clean-ups:
- __cmpxchg_double*() return type fix to avoid truncation of a long to int and subsequent logical "not" in cmpxchg_double() misinterpreting the operation success/failure - BPF fixes for mod and div by zero - Fix compilation with STRICT_MM_TYPECHECKS enabled - VDSO build fix without libgcov - Some static and __maybe_unused annotations - Kconfig clean-up (FRAME_POINTER) - defconfig update for CRYPTO_CRC32_ARM64 -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJWRNNbAAoJEGvWsS0AyF7xp+wQAIc0A+uSReEJ0Be3kSWZIy0O 9wGCtfp2e3X78ibgVoP/+KvA1JUrMJNwNH54CgGgG6H4rwjRthCvIV/HbKfYufM8 vfuTL2MV1ywkNO0uTzspsICqgKPcpG27SwAlgOcxNXpO0Kui2OlKSxS4kTA8+6Z5 Lm64qDmFG7Z6wcBHhr8JSngC+xvXOvlcUW8odnjXjyCimwnpCFXXnRWDU3RnXJZa 3Khgp8OiRtnCSLfj7YBQA9wfNNgPgKdJ5wevz2g7hiIbYx0IOHmDpzbb3sUNMMKV XLKeeJgqZL4EXZBCzapHRHCE/q0kiiBhzYSHw6aOBwjD9v683aytT/ax2/AgjzvW nB3ZPdrbRMjcmNRBT2bheoU8diilhtfxSxf+4T+pVUnVMXDNl/xY9hekGA0hFO1z nH5P5vkFKsX3U02Ox/G50Od2rM6p7uGRGFYuomSIoJYBItuxGOAuYWlY2+ujcxY5 YvAQ+3FYCkjLipVutlqLxKoZSY8Ex+0LOjPYYsI/+rsE70IVjGuLj0bTm8B/aTcy dOctNqvOGwo8O5n2jsKM3XkjfUCPRdzu1C7rQz2BqfE9cPAZxg2fQpPv4SGtPuFe lEvokuYRJ3qYnMt5MG/9Mkqmczfbch88A41wgS9/ySQ57eo3wISLkOiKqzKdJjOa 0qldWaEvST2iVUQmiMl7 =ApkD -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 fixes and clean-ups from Catalin Marinas: "Here's a second pull request for this merging window with some fixes/clean-ups: - __cmpxchg_double*() return type fix to avoid truncation of a long to int and subsequent logical "not" in cmpxchg_double() misinterpreting the operation success/failure - BPF fixes for mod and div by zero - Fix compilation with STRICT_MM_TYPECHECKS enabled - VDSO build fix without libgcov - Some static and __maybe_unused annotations - Kconfig clean-up (FRAME_POINTER) - defconfig update for CRYPTO_CRC32_ARM64" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: suspend: make hw_breakpoint_restore static arm64: mmu: make split_pud and fixup_executable static arm64: smp: make of_parse_and_init_cpus static arm64: use linux/types.h in kvm.h arm64: build vdso without libgcov arm64: mark cpus_have_hwcap as __maybe_unused arm64: remove redundant FRAME_POINTER kconfig option and force to select it arm64: fix R/O permissions of FDT mapping arm64: fix STRICT_MM_TYPECHECKS issue in PTE_CONT manipulation arm64: bpf: fix mod-by-zero case arm64: bpf: fix div-by-zero case arm64: Enable CRYPTO_CRC32_ARM64 in defconfig arm64: cmpxchg_dbl: fix return value type
This commit is contained in:
commit
a18e2fa5e6
14 changed files with 54 additions and 33 deletions
|
@ -27,6 +27,7 @@ config ARM64
|
|||
select CPU_PM if (SUSPEND || CPU_IDLE)
|
||||
select DCACHE_WORD_ACCESS
|
||||
select EDAC_SUPPORT
|
||||
select FRAME_POINTER
|
||||
select GENERIC_ALLOCATOR
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select GENERIC_CLOCKEVENTS_BROADCAST
|
||||
|
|
|
@ -2,10 +2,6 @@ menu "Kernel hacking"
|
|||
|
||||
source "lib/Kconfig.debug"
|
||||
|
||||
config FRAME_POINTER
|
||||
bool
|
||||
default y
|
||||
|
||||
config ARM64_PTDUMP
|
||||
bool "Export kernel pagetable layout to userspace via debugfs"
|
||||
depends on DEBUG_KERNEL
|
||||
|
|
|
@ -224,3 +224,4 @@ CONFIG_CRYPTO_GHASH_ARM64_CE=y
|
|||
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
|
||||
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
|
||||
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
|
||||
CONFIG_CRYPTO_CRC32_ARM64=y
|
||||
|
|
|
@ -233,7 +233,7 @@ __CMPXCHG_CASE( , , mb_8, dmb ish, , l, "memory")
|
|||
#undef __CMPXCHG_CASE
|
||||
|
||||
#define __CMPXCHG_DBL(name, mb, rel, cl) \
|
||||
__LL_SC_INLINE int \
|
||||
__LL_SC_INLINE long \
|
||||
__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \
|
||||
unsigned long old2, \
|
||||
unsigned long new1, \
|
||||
|
|
|
@ -387,7 +387,7 @@ __CMPXCHG_CASE(x, , mb_8, al, "memory")
|
|||
#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op)
|
||||
|
||||
#define __CMPXCHG_DBL(name, mb, cl...) \
|
||||
static inline int __cmpxchg_double##name(unsigned long old1, \
|
||||
static inline long __cmpxchg_double##name(unsigned long old1, \
|
||||
unsigned long old2, \
|
||||
unsigned long new1, \
|
||||
unsigned long new2, \
|
||||
|
|
|
@ -80,6 +80,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
|
|||
#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/psci.h>
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
|
|
|
@ -696,7 +696,7 @@ static void cap_set_hwcap(const struct arm64_cpu_capabilities *cap)
|
|||
}
|
||||
|
||||
/* Check if we have a particular HWCAP enabled */
|
||||
static bool cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
|
||||
static bool __maybe_unused cpus_have_hwcap(const struct arm64_cpu_capabilities *cap)
|
||||
{
|
||||
bool rc;
|
||||
|
||||
|
|
|
@ -473,7 +473,7 @@ acpi_parse_gic_cpu_interface(struct acpi_subtable_header *header,
|
|||
* cpu logical map array containing MPIDR values related to logical
|
||||
* cpus. Assumes that cpu_logical_map(0) has already been initialized.
|
||||
*/
|
||||
void __init of_parse_and_init_cpus(void)
|
||||
static void __init of_parse_and_init_cpus(void)
|
||||
{
|
||||
struct device_node *dn = NULL;
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
|
|||
* time the notifier runs debug exceptions might have been enabled already,
|
||||
* with HW breakpoints registers content still in an unknown state.
|
||||
*/
|
||||
void (*hw_breakpoint_restore)(void *);
|
||||
static void (*hw_breakpoint_restore)(void *);
|
||||
void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
|
||||
{
|
||||
/* Prevent multiple restore hook initializations */
|
||||
|
|
|
@ -15,6 +15,9 @@ ccflags-y := -shared -fno-common -fno-builtin
|
|||
ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
|
||||
$(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
|
||||
|
||||
# Disable gcov profiling for VDSO code
|
||||
GCOV_PROFILE := n
|
||||
|
||||
# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
|
||||
# down to collect2, resulting in silent corruption of the vDSO image.
|
||||
ccflags-y += -Wl,-shared
|
||||
|
|
|
@ -146,7 +146,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|||
if (((addr | next | phys) & ~CONT_MASK) == 0) {
|
||||
/* a block of CONT_PTES */
|
||||
__populate_init_pte(pte, addr, next, phys,
|
||||
prot | __pgprot(PTE_CONT));
|
||||
__pgprot(pgprot_val(prot) | PTE_CONT));
|
||||
} else {
|
||||
/*
|
||||
* If the range being split is already inside of a
|
||||
|
@ -165,7 +165,7 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
|||
} while (addr != end);
|
||||
}
|
||||
|
||||
void split_pud(pud_t *old_pud, pmd_t *pmd)
|
||||
static void split_pud(pud_t *old_pud, pmd_t *pmd)
|
||||
{
|
||||
unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
|
||||
pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
|
||||
|
@ -447,7 +447,7 @@ static void __init map_mem(void)
|
|||
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
||||
}
|
||||
|
||||
void __init fixup_executable(void)
|
||||
static void __init fixup_executable(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
/* now that we are actually fully mapped, make the start/end more fine grained */
|
||||
|
@ -691,7 +691,7 @@ void __set_fixmap(enum fixed_addresses idx,
|
|||
void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
|
||||
{
|
||||
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
|
||||
pgprot_t prot = PAGE_KERNEL | PTE_RDONLY;
|
||||
pgprot_t prot = PAGE_KERNEL_RO;
|
||||
int size, offset;
|
||||
void *dt_virt;
|
||||
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* BPF JIT compiler for ARM64
|
||||
*
|
||||
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
|
||||
* Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -35,6 +35,7 @@
|
|||
aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
|
||||
AARCH64_INSN_BRANCH_COMP_##type)
|
||||
#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
|
||||
#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
|
||||
|
||||
/* Conditional branch (immediate) */
|
||||
#define A64_COND_BRANCH(cond, offset) \
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
/*
|
||||
* BPF JIT compiler for ARM64
|
||||
*
|
||||
* Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com>
|
||||
* Copyright (C) 2014-2015 Zi Shen Lim <zlim.lnx@gmail.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -225,6 +225,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||
u8 jmp_cond;
|
||||
s32 jmp_offset;
|
||||
|
||||
#define check_imm(bits, imm) do { \
|
||||
if ((((imm) > 0) && ((imm) >> (bits))) || \
|
||||
(((imm) < 0) && (~(imm) >> (bits)))) { \
|
||||
pr_info("[%2d] imm=%d(0x%x) out of range\n", \
|
||||
i, imm, imm); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while (0)
|
||||
#define check_imm19(imm) check_imm(19, imm)
|
||||
#define check_imm26(imm) check_imm(26, imm)
|
||||
|
||||
switch (code) {
|
||||
/* dst = src */
|
||||
case BPF_ALU | BPF_MOV | BPF_X:
|
||||
|
@ -258,15 +269,33 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
|||
break;
|
||||
case BPF_ALU | BPF_DIV | BPF_X:
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X:
|
||||
emit(A64_UDIV(is64, dst, dst, src), ctx);
|
||||
break;
|
||||
case BPF_ALU | BPF_MOD | BPF_X:
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X:
|
||||
ctx->tmp_used = 1;
|
||||
emit(A64_UDIV(is64, tmp, dst, src), ctx);
|
||||
emit(A64_MUL(is64, tmp, tmp, src), ctx);
|
||||
emit(A64_SUB(is64, dst, dst, tmp), ctx);
|
||||
{
|
||||
const u8 r0 = bpf2a64[BPF_REG_0];
|
||||
|
||||
/* if (src == 0) return 0 */
|
||||
jmp_offset = 3; /* skip ahead to else path */
|
||||
check_imm19(jmp_offset);
|
||||
emit(A64_CBNZ(is64, src, jmp_offset), ctx);
|
||||
emit(A64_MOVZ(1, r0, 0, 0), ctx);
|
||||
jmp_offset = epilogue_offset(ctx);
|
||||
check_imm26(jmp_offset);
|
||||
emit(A64_B(jmp_offset), ctx);
|
||||
/* else */
|
||||
switch (BPF_OP(code)) {
|
||||
case BPF_DIV:
|
||||
emit(A64_UDIV(is64, dst, dst, src), ctx);
|
||||
break;
|
||||
case BPF_MOD:
|
||||
ctx->tmp_used = 1;
|
||||
emit(A64_UDIV(is64, tmp, dst, src), ctx);
|
||||
emit(A64_MUL(is64, tmp, tmp, src), ctx);
|
||||
emit(A64_SUB(is64, dst, dst, tmp), ctx);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case BPF_ALU | BPF_LSH | BPF_X:
|
||||
case BPF_ALU64 | BPF_LSH | BPF_X:
|
||||
emit(A64_LSLV(is64, dst, dst, src), ctx);
|
||||
|
@ -393,17 +422,6 @@ emit_bswap_uxt:
|
|||
emit(A64_ASR(is64, dst, dst, imm), ctx);
|
||||
break;
|
||||
|
||||
#define check_imm(bits, imm) do { \
|
||||
if ((((imm) > 0) && ((imm) >> (bits))) || \
|
||||
(((imm) < 0) && (~(imm) >> (bits)))) { \
|
||||
pr_info("[%2d] imm=%d(0x%x) out of range\n", \
|
||||
i, imm, imm); \
|
||||
return -EINVAL; \
|
||||
} \
|
||||
} while (0)
|
||||
#define check_imm19(imm) check_imm(19, imm)
|
||||
#define check_imm26(imm) check_imm(26, imm)
|
||||
|
||||
/* JUMP off */
|
||||
case BPF_JMP | BPF_JA:
|
||||
jmp_offset = bpf2a64_offset(i + off, i, ctx);
|
||||
|
|
Loading…
Reference in a new issue