Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Alexei Starovoitov says: ==================== pull-request: bpf 2019-07-18 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) verifier precision propagation fix, from Andrii. 2) BTF size fix for typedefs, from Andrii. 3) a bunch of big endian fixes, from Ilya. 4) wide load from bpf_sock_addr fixes, from Stanislav. 5) a bunch of misc fixes from a number of developers. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
bb74523167
32 changed files with 391 additions and 195 deletions
|
@ -3108,9 +3108,9 @@ S: Maintained
|
||||||
F: arch/riscv/net/
|
F: arch/riscv/net/
|
||||||
|
|
||||||
BPF JIT for S390
|
BPF JIT for S390
|
||||||
|
M: Ilya Leoshkevich <iii@linux.ibm.com>
|
||||||
M: Heiko Carstens <heiko.carstens@de.ibm.com>
|
M: Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||||
M: Vasily Gorbik <gor@linux.ibm.com>
|
M: Vasily Gorbik <gor@linux.ibm.com>
|
||||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: bpf@vger.kernel.org
|
L: bpf@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
|
@ -747,7 +747,7 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
|
||||||
return size <= size_default && (size & (size - 1)) == 0;
|
return size <= size_default && (size & (size - 1)) == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define bpf_ctx_wide_store_ok(off, size, type, field) \
|
#define bpf_ctx_wide_access_ok(off, size, type, field) \
|
||||||
(size == sizeof(__u64) && \
|
(size == sizeof(__u64) && \
|
||||||
off >= offsetof(type, field) && \
|
off >= offsetof(type, field) && \
|
||||||
off + sizeof(__u64) <= offsetofend(type, field) && \
|
off + sizeof(__u64) <= offsetofend(type, field) && \
|
||||||
|
|
|
@ -3248,7 +3248,7 @@ struct bpf_sock_addr {
|
||||||
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
|
__u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__u32 user_port; /* Allows 4-byte read and write.
|
__u32 user_port; /* Allows 4-byte read and write.
|
||||||
|
@ -3260,7 +3260,7 @@ struct bpf_sock_addr {
|
||||||
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
|
__u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__bpf_md_ptr(struct bpf_sock *, sk);
|
__bpf_md_ptr(struct bpf_sock *, sk);
|
||||||
|
|
|
@ -1073,11 +1073,18 @@ const struct btf_type *btf_type_id_size(const struct btf *btf,
|
||||||
!btf_type_is_var(size_type)))
|
!btf_type_is_var(size_type)))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
size = btf->resolved_sizes[size_type_id];
|
|
||||||
size_type_id = btf->resolved_ids[size_type_id];
|
size_type_id = btf->resolved_ids[size_type_id];
|
||||||
size_type = btf_type_by_id(btf, size_type_id);
|
size_type = btf_type_by_id(btf, size_type_id);
|
||||||
if (btf_type_nosize_or_null(size_type))
|
if (btf_type_nosize_or_null(size_type))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
else if (btf_type_has_size(size_type))
|
||||||
|
size = size_type->size;
|
||||||
|
else if (btf_type_is_array(size_type))
|
||||||
|
size = btf->resolved_sizes[size_type_id];
|
||||||
|
else if (btf_type_is_ptr(size_type))
|
||||||
|
size = sizeof(void *);
|
||||||
|
else
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
*type_id = size_type_id;
|
*type_id = size_type_id;
|
||||||
|
@ -1602,7 +1609,6 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
|
||||||
const struct btf_type *next_type;
|
const struct btf_type *next_type;
|
||||||
u32 next_type_id = t->type;
|
u32 next_type_id = t->type;
|
||||||
struct btf *btf = env->btf;
|
struct btf *btf = env->btf;
|
||||||
u32 next_type_size = 0;
|
|
||||||
|
|
||||||
next_type = btf_type_by_id(btf, next_type_id);
|
next_type = btf_type_by_id(btf, next_type_id);
|
||||||
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
|
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
|
||||||
|
@ -1620,7 +1626,7 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
|
||||||
* save us a few type-following when we use it later (e.g. in
|
* save us a few type-following when we use it later (e.g. in
|
||||||
* pretty print).
|
* pretty print).
|
||||||
*/
|
*/
|
||||||
if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
|
if (!btf_type_id_size(btf, &next_type_id, NULL)) {
|
||||||
if (env_type_is_resolved(env, next_type_id))
|
if (env_type_is_resolved(env, next_type_id))
|
||||||
next_type = btf_type_id_resolve(btf, &next_type_id);
|
next_type = btf_type_id_resolve(btf, &next_type_id);
|
||||||
|
|
||||||
|
@ -1633,7 +1639,7 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
env_stack_pop_resolved(env, next_type_id, next_type_size);
|
env_stack_pop_resolved(env, next_type_id, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1645,7 +1651,6 @@ static int btf_var_resolve(struct btf_verifier_env *env,
|
||||||
const struct btf_type *t = v->t;
|
const struct btf_type *t = v->t;
|
||||||
u32 next_type_id = t->type;
|
u32 next_type_id = t->type;
|
||||||
struct btf *btf = env->btf;
|
struct btf *btf = env->btf;
|
||||||
u32 next_type_size;
|
|
||||||
|
|
||||||
next_type = btf_type_by_id(btf, next_type_id);
|
next_type = btf_type_by_id(btf, next_type_id);
|
||||||
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
|
if (!next_type || btf_type_is_resolve_source_only(next_type)) {
|
||||||
|
@ -1675,12 +1680,12 @@ static int btf_var_resolve(struct btf_verifier_env *env,
|
||||||
* forward types or similar that would resolve to size of
|
* forward types or similar that would resolve to size of
|
||||||
* zero is allowed.
|
* zero is allowed.
|
||||||
*/
|
*/
|
||||||
if (!btf_type_id_size(btf, &next_type_id, &next_type_size)) {
|
if (!btf_type_id_size(btf, &next_type_id, NULL)) {
|
||||||
btf_verifier_log_type(env, v->t, "Invalid type_id");
|
btf_verifier_log_type(env, v->t, "Invalid type_id");
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
env_stack_pop_resolved(env, next_type_id, next_type_size);
|
env_stack_pop_resolved(env, next_type_id, 0);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1519,9 +1519,9 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
*stack_mask |= 1ull << spi;
|
*stack_mask |= 1ull << spi;
|
||||||
} else if (class == BPF_STX) {
|
} else if (class == BPF_STX || class == BPF_ST) {
|
||||||
if (*reg_mask & dreg)
|
if (*reg_mask & dreg)
|
||||||
/* stx shouldn't be using _scalar_ dst_reg
|
/* stx & st shouldn't be using _scalar_ dst_reg
|
||||||
* to access memory. It means backtracking
|
* to access memory. It means backtracking
|
||||||
* encountered a case of pointer subtraction.
|
* encountered a case of pointer subtraction.
|
||||||
*/
|
*/
|
||||||
|
@ -1540,6 +1540,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||||
if (!(*stack_mask & (1ull << spi)))
|
if (!(*stack_mask & (1ull << spi)))
|
||||||
return 0;
|
return 0;
|
||||||
*stack_mask &= ~(1ull << spi);
|
*stack_mask &= ~(1ull << spi);
|
||||||
|
if (class == BPF_STX)
|
||||||
*reg_mask |= sreg;
|
*reg_mask |= sreg;
|
||||||
} else if (class == BPF_JMP || class == BPF_JMP32) {
|
} else if (class == BPF_JMP || class == BPF_JMP32) {
|
||||||
if (opcode == BPF_CALL) {
|
if (opcode == BPF_CALL) {
|
||||||
|
@ -1569,10 +1570,6 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx,
|
||||||
if (mode == BPF_IND || mode == BPF_ABS)
|
if (mode == BPF_IND || mode == BPF_ABS)
|
||||||
/* to be analyzed */
|
/* to be analyzed */
|
||||||
return -ENOTSUPP;
|
return -ENOTSUPP;
|
||||||
} else if (class == BPF_ST) {
|
|
||||||
if (*reg_mask & dreg)
|
|
||||||
/* likely pointer subtraction */
|
|
||||||
return -ENOTSUPP;
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -6106,11 +6103,13 @@ static int check_return_code(struct bpf_verifier_env *env)
|
||||||
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
|
if (env->prog->expected_attach_type == BPF_CGROUP_UDP4_RECVMSG ||
|
||||||
env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
|
env->prog->expected_attach_type == BPF_CGROUP_UDP6_RECVMSG)
|
||||||
range = tnum_range(1, 1);
|
range = tnum_range(1, 1);
|
||||||
|
break;
|
||||||
case BPF_PROG_TYPE_CGROUP_SKB:
|
case BPF_PROG_TYPE_CGROUP_SKB:
|
||||||
if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
|
if (env->prog->expected_attach_type == BPF_CGROUP_INET_EGRESS) {
|
||||||
range = tnum_range(0, 3);
|
range = tnum_range(0, 3);
|
||||||
enforce_attach_type_range = tnum_range(2, 3);
|
enforce_attach_type_range = tnum_range(2, 3);
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
case BPF_PROG_TYPE_CGROUP_SOCK:
|
case BPF_PROG_TYPE_CGROUP_SOCK:
|
||||||
case BPF_PROG_TYPE_SOCK_OPS:
|
case BPF_PROG_TYPE_SOCK_OPS:
|
||||||
case BPF_PROG_TYPE_CGROUP_DEVICE:
|
case BPF_PROG_TYPE_CGROUP_DEVICE:
|
||||||
|
|
|
@ -6884,18 +6884,28 @@ static bool sock_addr_is_valid_access(int off, int size,
|
||||||
case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
|
case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
|
||||||
case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
|
case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
|
||||||
msg_src_ip6[3]):
|
msg_src_ip6[3]):
|
||||||
/* Only narrow read access allowed for now. */
|
|
||||||
if (type == BPF_READ) {
|
if (type == BPF_READ) {
|
||||||
bpf_ctx_record_field_size(info, size_default);
|
bpf_ctx_record_field_size(info, size_default);
|
||||||
if (!bpf_ctx_narrow_access_ok(off, size, size_default))
|
|
||||||
return false;
|
if (bpf_ctx_wide_access_ok(off, size,
|
||||||
} else {
|
|
||||||
if (bpf_ctx_wide_store_ok(off, size,
|
|
||||||
struct bpf_sock_addr,
|
struct bpf_sock_addr,
|
||||||
user_ip6))
|
user_ip6))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
if (bpf_ctx_wide_store_ok(off, size,
|
if (bpf_ctx_wide_access_ok(off, size,
|
||||||
|
struct bpf_sock_addr,
|
||||||
|
msg_src_ip6))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (!bpf_ctx_narrow_access_ok(off, size, size_default))
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
if (bpf_ctx_wide_access_ok(off, size,
|
||||||
|
struct bpf_sock_addr,
|
||||||
|
user_ip6))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (bpf_ctx_wide_access_ok(off, size,
|
||||||
struct bpf_sock_addr,
|
struct bpf_sock_addr,
|
||||||
msg_src_ip6))
|
msg_src_ip6))
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -87,21 +87,20 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
||||||
struct netdev_bpf bpf;
|
struct netdev_bpf bpf;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
|
|
||||||
|
ASSERT_RTNL();
|
||||||
|
|
||||||
force_zc = flags & XDP_ZEROCOPY;
|
force_zc = flags & XDP_ZEROCOPY;
|
||||||
force_copy = flags & XDP_COPY;
|
force_copy = flags & XDP_COPY;
|
||||||
|
|
||||||
if (force_zc && force_copy)
|
if (force_zc && force_copy)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
rtnl_lock();
|
if (xdp_get_umem_from_qid(dev, queue_id))
|
||||||
if (xdp_get_umem_from_qid(dev, queue_id)) {
|
return -EBUSY;
|
||||||
err = -EBUSY;
|
|
||||||
goto out_rtnl_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = xdp_reg_umem_at_qid(dev, umem, queue_id);
|
err = xdp_reg_umem_at_qid(dev, umem, queue_id);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_rtnl_unlock;
|
return err;
|
||||||
|
|
||||||
umem->dev = dev;
|
umem->dev = dev;
|
||||||
umem->queue_id = queue_id;
|
umem->queue_id = queue_id;
|
||||||
|
@ -110,7 +109,7 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
||||||
|
|
||||||
if (force_copy)
|
if (force_copy)
|
||||||
/* For copy-mode, we are done. */
|
/* For copy-mode, we are done. */
|
||||||
goto out_rtnl_unlock;
|
return 0;
|
||||||
|
|
||||||
if (!dev->netdev_ops->ndo_bpf ||
|
if (!dev->netdev_ops->ndo_bpf ||
|
||||||
!dev->netdev_ops->ndo_xsk_async_xmit) {
|
!dev->netdev_ops->ndo_xsk_async_xmit) {
|
||||||
|
@ -125,7 +124,6 @@ int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
|
||||||
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
|
err = dev->netdev_ops->ndo_bpf(dev, &bpf);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_unreg_umem;
|
goto err_unreg_umem;
|
||||||
rtnl_unlock();
|
|
||||||
|
|
||||||
umem->zc = true;
|
umem->zc = true;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -135,8 +133,6 @@ err_unreg_umem:
|
||||||
err = 0; /* fallback to copy mode */
|
err = 0; /* fallback to copy mode */
|
||||||
if (err)
|
if (err)
|
||||||
xdp_clear_umem_at_qid(dev, queue_id);
|
xdp_clear_umem_at_qid(dev, queue_id);
|
||||||
out_rtnl_unlock:
|
|
||||||
rtnl_unlock();
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -240,6 +240,9 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
|
||||||
|
|
||||||
mutex_lock(&xs->mutex);
|
mutex_lock(&xs->mutex);
|
||||||
|
|
||||||
|
if (xs->queue_id >= xs->dev->real_num_tx_queues)
|
||||||
|
goto out;
|
||||||
|
|
||||||
while (xskq_peek_desc(xs->tx, &desc)) {
|
while (xskq_peek_desc(xs->tx, &desc)) {
|
||||||
char *buffer;
|
char *buffer;
|
||||||
u64 addr;
|
u64 addr;
|
||||||
|
@ -250,12 +253,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (xskq_reserve_addr(xs->umem->cq))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (xs->queue_id >= xs->dev->real_num_tx_queues)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
len = desc.len;
|
len = desc.len;
|
||||||
skb = sock_alloc_send_skb(sk, len, 1, &err);
|
skb = sock_alloc_send_skb(sk, len, 1, &err);
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
|
@ -267,7 +264,7 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
|
||||||
addr = desc.addr;
|
addr = desc.addr;
|
||||||
buffer = xdp_umem_get_data(xs->umem, addr);
|
buffer = xdp_umem_get_data(xs->umem, addr);
|
||||||
err = skb_store_bits(skb, 0, buffer, len);
|
err = skb_store_bits(skb, 0, buffer, len);
|
||||||
if (unlikely(err)) {
|
if (unlikely(err) || xskq_reserve_addr(xs->umem->cq)) {
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
@ -433,6 +430,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
||||||
if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
|
if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
rtnl_lock();
|
||||||
mutex_lock(&xs->mutex);
|
mutex_lock(&xs->mutex);
|
||||||
if (xs->state != XSK_READY) {
|
if (xs->state != XSK_READY) {
|
||||||
err = -EBUSY;
|
err = -EBUSY;
|
||||||
|
@ -518,6 +516,7 @@ out_unlock:
|
||||||
xs->state = XSK_BOUND;
|
xs->state = XSK_BOUND;
|
||||||
out_release:
|
out_release:
|
||||||
mutex_unlock(&xs->mutex);
|
mutex_unlock(&xs->mutex);
|
||||||
|
rtnl_unlock();
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -284,7 +284,7 @@ $(obj)/%.o: $(src)/%.c
|
||||||
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
|
$(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \
|
||||||
-I$(srctree)/tools/testing/selftests/bpf/ \
|
-I$(srctree)/tools/testing/selftests/bpf/ \
|
||||||
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
|
-D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
|
||||||
-D__TARGET_ARCH_$(ARCH) -Wno-compare-distinct-pointer-types \
|
-D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
|
||||||
-Wno-gnu-variable-sized-type-not-at-end \
|
-Wno-gnu-variable-sized-type-not-at-end \
|
||||||
-Wno-address-of-packed-member -Wno-tautological-compare \
|
-Wno-address-of-packed-member -Wno-tautological-compare \
|
||||||
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
|
-Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
|
||||||
|
|
|
@ -74,6 +74,7 @@ static const char * const prog_type_name[] = {
|
||||||
[BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
|
[BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
|
||||||
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
|
[BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
|
||||||
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
|
[BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
|
||||||
|
[BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
|
||||||
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
|
[BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -3245,7 +3245,7 @@ struct bpf_sock_addr {
|
||||||
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
__u32 user_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__u32 user_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
|
__u32 user_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__u32 user_port; /* Allows 4-byte read and write.
|
__u32 user_port; /* Allows 4-byte read and write.
|
||||||
|
@ -3257,7 +3257,7 @@ struct bpf_sock_addr {
|
||||||
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
__u32 msg_src_ip4; /* Allows 1,2,4-byte read and 4-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__u32 msg_src_ip6[4]; /* Allows 1,2,4-byte read and 4,8-byte write.
|
__u32 msg_src_ip6[4]; /* Allows 1,2,4,8-byte read and 4,8-byte write.
|
||||||
* Stored in network byte order.
|
* Stored in network byte order.
|
||||||
*/
|
*/
|
||||||
__bpf_md_ptr(struct bpf_sock *, sk);
|
__bpf_md_ptr(struct bpf_sock *, sk);
|
||||||
|
|
|
@ -4126,7 +4126,7 @@ static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
|
||||||
}
|
}
|
||||||
attr.size = sizeof(attr);
|
attr.size = sizeof(attr);
|
||||||
attr.type = type;
|
attr.type = type;
|
||||||
attr.config1 = (uint64_t)(void *)name; /* kprobe_func or uprobe_path */
|
attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
|
||||||
attr.config2 = offset; /* kprobe_addr or probe_offset */
|
attr.config2 = offset; /* kprobe_addr or probe_offset */
|
||||||
|
|
||||||
/* pid filter is meaningful only for uprobes */
|
/* pid filter is meaningful only for uprobes */
|
||||||
|
|
|
@ -517,7 +517,8 @@ int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
|
||||||
err = -errno;
|
err = -errno;
|
||||||
goto out_socket;
|
goto out_socket;
|
||||||
}
|
}
|
||||||
strncpy(xsk->ifname, ifname, IFNAMSIZ);
|
strncpy(xsk->ifname, ifname, IFNAMSIZ - 1);
|
||||||
|
xsk->ifname[IFNAMSIZ - 1] = '\0';
|
||||||
|
|
||||||
err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
|
err = xsk_set_xdp_socket_config(&xsk->config, usr_config);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -1,4 +1,6 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
include ../../../../scripts/Kbuild.include
|
||||||
|
include ../../../scripts/Makefile.arch
|
||||||
|
|
||||||
LIBDIR := ../../../lib
|
LIBDIR := ../../../lib
|
||||||
BPFDIR := $(LIBDIR)/bpf
|
BPFDIR := $(LIBDIR)/bpf
|
||||||
|
@ -81,13 +83,14 @@ all: $(TEST_CUSTOM_PROGS)
|
||||||
$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
|
$(OUTPUT)/urandom_read: $(OUTPUT)/%: %.c
|
||||||
$(CC) -o $@ $< -Wl,--build-id
|
$(CC) -o $@ $< -Wl,--build-id
|
||||||
|
|
||||||
$(OUTPUT)/test_maps: map_tests/*.c
|
$(OUTPUT)/test_stub.o: test_stub.c
|
||||||
|
$(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) -c -o $@ $<
|
||||||
|
|
||||||
BPFOBJ := $(OUTPUT)/libbpf.a
|
BPFOBJ := $(OUTPUT)/libbpf.a
|
||||||
|
|
||||||
$(TEST_GEN_PROGS): test_stub.o $(BPFOBJ)
|
$(TEST_GEN_PROGS): $(OUTPUT)/test_stub.o $(BPFOBJ)
|
||||||
|
|
||||||
$(TEST_GEN_PROGS_EXTENDED): test_stub.o $(OUTPUT)/libbpf.a
|
$(TEST_GEN_PROGS_EXTENDED): $(OUTPUT)/test_stub.o $(OUTPUT)/libbpf.a
|
||||||
|
|
||||||
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
|
$(OUTPUT)/test_dev_cgroup: cgroup_helpers.c
|
||||||
$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
|
$(OUTPUT)/test_skb_cgroup_id_user: cgroup_helpers.c
|
||||||
|
@ -138,7 +141,8 @@ CLANG_SYS_INCLUDES := $(shell $(CLANG) -v -E - </dev/null 2>&1 \
|
||||||
|
|
||||||
CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
|
CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \
|
||||||
$(CLANG_SYS_INCLUDES) \
|
$(CLANG_SYS_INCLUDES) \
|
||||||
-Wno-compare-distinct-pointer-types
|
-Wno-compare-distinct-pointer-types \
|
||||||
|
-D__TARGET_ARCH_$(SRCARCH)
|
||||||
|
|
||||||
$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
|
$(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline
|
||||||
$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
|
$(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
|
||||||
|
@ -172,6 +176,7 @@ endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
|
TEST_PROGS_CFLAGS := -I. -I$(OUTPUT)
|
||||||
|
TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
|
||||||
TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
|
TEST_VERIFIER_CFLAGS := -I. -I$(OUTPUT) -Iverifier
|
||||||
|
|
||||||
ifneq ($(SUBREG_CODEGEN),)
|
ifneq ($(SUBREG_CODEGEN),)
|
||||||
|
@ -180,12 +185,12 @@ TEST_CUSTOM_PROGS += $(ALU32_BUILD_DIR)/test_progs_32
|
||||||
$(ALU32_BUILD_DIR):
|
$(ALU32_BUILD_DIR):
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
|
|
||||||
$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read
|
$(ALU32_BUILD_DIR)/urandom_read: $(OUTPUT)/urandom_read | $(ALU32_BUILD_DIR)
|
||||||
cp $< $@
|
cp $< $@
|
||||||
|
|
||||||
$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
|
$(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
|
||||||
$(ALU32_BUILD_DIR) \
|
$(ALU32_BUILD_DIR)/urandom_read \
|
||||||
$(ALU32_BUILD_DIR)/urandom_read
|
| $(ALU32_BUILD_DIR)
|
||||||
$(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
|
$(CC) $(TEST_PROGS_CFLAGS) $(CFLAGS) \
|
||||||
-o $(ALU32_BUILD_DIR)/test_progs_32 \
|
-o $(ALU32_BUILD_DIR)/test_progs_32 \
|
||||||
test_progs.c test_stub.c trace_helpers.c prog_tests/*.c \
|
test_progs.c test_stub.c trace_helpers.c prog_tests/*.c \
|
||||||
|
@ -194,10 +199,10 @@ $(ALU32_BUILD_DIR)/test_progs_32: test_progs.c $(OUTPUT)/libbpf.a\
|
||||||
$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
|
$(ALU32_BUILD_DIR)/test_progs_32: $(PROG_TESTS_H)
|
||||||
$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
|
$(ALU32_BUILD_DIR)/test_progs_32: prog_tests/*.c
|
||||||
|
|
||||||
$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR) \
|
$(ALU32_BUILD_DIR)/%.o: progs/%.c $(ALU32_BUILD_DIR)/test_progs_32 \
|
||||||
$(ALU32_BUILD_DIR)/test_progs_32
|
| $(ALU32_BUILD_DIR)
|
||||||
$(CLANG) $(CLANG_FLAGS) \
|
($(CLANG) $(CLANG_FLAGS) -O2 -target bpf -emit-llvm -c $< -o - || \
|
||||||
-O2 -target bpf -emit-llvm -c $< -o - | \
|
echo "clang failed") | \
|
||||||
$(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
|
$(LLC) -march=bpf -mattr=+alu32 -mcpu=$(CPU) $(LLC_FLAGS) \
|
||||||
-filetype=obj -o $@
|
-filetype=obj -o $@
|
||||||
ifeq ($(DWARF2BTF),y)
|
ifeq ($(DWARF2BTF),y)
|
||||||
|
@ -208,32 +213,30 @@ endif
|
||||||
# Have one program compiled without "-target bpf" to test whether libbpf loads
|
# Have one program compiled without "-target bpf" to test whether libbpf loads
|
||||||
# it successfully
|
# it successfully
|
||||||
$(OUTPUT)/test_xdp.o: progs/test_xdp.c
|
$(OUTPUT)/test_xdp.o: progs/test_xdp.c
|
||||||
$(CLANG) $(CLANG_FLAGS) \
|
($(CLANG) $(CLANG_FLAGS) -O2 -emit-llvm -c $< -o - || \
|
||||||
-O2 -emit-llvm -c $< -o - | \
|
echo "clang failed") | \
|
||||||
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
|
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
|
||||||
ifeq ($(DWARF2BTF),y)
|
ifeq ($(DWARF2BTF),y)
|
||||||
$(BTF_PAHOLE) -J $@
|
$(BTF_PAHOLE) -J $@
|
||||||
endif
|
endif
|
||||||
|
|
||||||
$(OUTPUT)/%.o: progs/%.c
|
$(OUTPUT)/%.o: progs/%.c
|
||||||
$(CLANG) $(CLANG_FLAGS) \
|
($(CLANG) $(CLANG_FLAGS) -O2 -target bpf -emit-llvm -c $< -o - || \
|
||||||
-O2 -target bpf -emit-llvm -c $< -o - | \
|
echo "clang failed") | \
|
||||||
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
|
$(LLC) -march=bpf -mcpu=$(CPU) $(LLC_FLAGS) -filetype=obj -o $@
|
||||||
ifeq ($(DWARF2BTF),y)
|
ifeq ($(DWARF2BTF),y)
|
||||||
$(BTF_PAHOLE) -J $@
|
$(BTF_PAHOLE) -J $@
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROG_TESTS_H := $(OUTPUT)/prog_tests/tests.h
|
|
||||||
test_progs.c: $(PROG_TESTS_H)
|
|
||||||
$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
|
|
||||||
$(OUTPUT)/test_progs: prog_tests/*.c
|
|
||||||
|
|
||||||
PROG_TESTS_DIR = $(OUTPUT)/prog_tests
|
PROG_TESTS_DIR = $(OUTPUT)/prog_tests
|
||||||
$(PROG_TESTS_DIR):
|
$(PROG_TESTS_DIR):
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
|
PROG_TESTS_H := $(PROG_TESTS_DIR)/tests.h
|
||||||
PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
|
PROG_TESTS_FILES := $(wildcard prog_tests/*.c)
|
||||||
$(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
|
test_progs.c: $(PROG_TESTS_H)
|
||||||
|
$(OUTPUT)/test_progs: CFLAGS += $(TEST_PROGS_CFLAGS)
|
||||||
|
$(OUTPUT)/test_progs: test_progs.c $(PROG_TESTS_H) $(PROG_TESTS_FILES)
|
||||||
|
$(PROG_TESTS_H): $(PROG_TESTS_FILES) | $(PROG_TESTS_DIR)
|
||||||
$(shell ( cd prog_tests/; \
|
$(shell ( cd prog_tests/; \
|
||||||
echo '/* Generated header, do not edit */'; \
|
echo '/* Generated header, do not edit */'; \
|
||||||
echo '#ifdef DECLARE'; \
|
echo '#ifdef DECLARE'; \
|
||||||
|
@ -246,15 +249,15 @@ $(PROG_TESTS_H): $(PROG_TESTS_DIR) $(PROG_TESTS_FILES)
|
||||||
echo '#endif' \
|
echo '#endif' \
|
||||||
) > $(PROG_TESTS_H))
|
) > $(PROG_TESTS_H))
|
||||||
|
|
||||||
TEST_MAPS_CFLAGS := -I. -I$(OUTPUT)
|
|
||||||
MAP_TESTS_DIR = $(OUTPUT)/map_tests
|
MAP_TESTS_DIR = $(OUTPUT)/map_tests
|
||||||
$(MAP_TESTS_DIR):
|
$(MAP_TESTS_DIR):
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
MAP_TESTS_H := $(MAP_TESTS_DIR)/tests.h
|
MAP_TESTS_H := $(MAP_TESTS_DIR)/tests.h
|
||||||
|
MAP_TESTS_FILES := $(wildcard map_tests/*.c)
|
||||||
test_maps.c: $(MAP_TESTS_H)
|
test_maps.c: $(MAP_TESTS_H)
|
||||||
$(OUTPUT)/test_maps: CFLAGS += $(TEST_MAPS_CFLAGS)
|
$(OUTPUT)/test_maps: CFLAGS += $(TEST_MAPS_CFLAGS)
|
||||||
MAP_TESTS_FILES := $(wildcard map_tests/*.c)
|
$(OUTPUT)/test_maps: test_maps.c $(MAP_TESTS_H) $(MAP_TESTS_FILES)
|
||||||
$(MAP_TESTS_H): $(MAP_TESTS_DIR) $(MAP_TESTS_FILES)
|
$(MAP_TESTS_H): $(MAP_TESTS_FILES) | $(MAP_TESTS_DIR)
|
||||||
$(shell ( cd map_tests/; \
|
$(shell ( cd map_tests/; \
|
||||||
echo '/* Generated header, do not edit */'; \
|
echo '/* Generated header, do not edit */'; \
|
||||||
echo '#ifdef DECLARE'; \
|
echo '#ifdef DECLARE'; \
|
||||||
|
@ -267,16 +270,15 @@ $(MAP_TESTS_H): $(MAP_TESTS_DIR) $(MAP_TESTS_FILES)
|
||||||
echo '#endif' \
|
echo '#endif' \
|
||||||
) > $(MAP_TESTS_H))
|
) > $(MAP_TESTS_H))
|
||||||
|
|
||||||
VERIFIER_TESTS_H := $(OUTPUT)/verifier/tests.h
|
|
||||||
test_verifier.c: $(VERIFIER_TESTS_H)
|
|
||||||
$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
|
|
||||||
|
|
||||||
VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
|
VERIFIER_TESTS_DIR = $(OUTPUT)/verifier
|
||||||
$(VERIFIER_TESTS_DIR):
|
$(VERIFIER_TESTS_DIR):
|
||||||
mkdir -p $@
|
mkdir -p $@
|
||||||
|
VERIFIER_TESTS_H := $(VERIFIER_TESTS_DIR)/tests.h
|
||||||
VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
|
VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
|
||||||
$(OUTPUT)/verifier/tests.h: $(VERIFIER_TESTS_DIR) $(VERIFIER_TEST_FILES)
|
test_verifier.c: $(VERIFIER_TESTS_H)
|
||||||
|
$(OUTPUT)/test_verifier: CFLAGS += $(TEST_VERIFIER_CFLAGS)
|
||||||
|
$(OUTPUT)/test_verifier: test_verifier.c $(VERIFIER_TESTS_H)
|
||||||
|
$(VERIFIER_TESTS_H): $(VERIFIER_TEST_FILES) | $(VERIFIER_TESTS_DIR)
|
||||||
$(shell ( cd verifier/; \
|
$(shell ( cd verifier/; \
|
||||||
echo '/* Generated header, do not edit */'; \
|
echo '/* Generated header, do not edit */'; \
|
||||||
echo '#ifdef FILL_ARRAY'; \
|
echo '#ifdef FILL_ARRAY'; \
|
||||||
|
|
|
@ -315,8 +315,8 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
|
||||||
#if defined(__TARGET_ARCH_x86)
|
#if defined(__TARGET_ARCH_x86)
|
||||||
#define bpf_target_x86
|
#define bpf_target_x86
|
||||||
#define bpf_target_defined
|
#define bpf_target_defined
|
||||||
#elif defined(__TARGET_ARCH_s930x)
|
#elif defined(__TARGET_ARCH_s390)
|
||||||
#define bpf_target_s930x
|
#define bpf_target_s390
|
||||||
#define bpf_target_defined
|
#define bpf_target_defined
|
||||||
#elif defined(__TARGET_ARCH_arm)
|
#elif defined(__TARGET_ARCH_arm)
|
||||||
#define bpf_target_arm
|
#define bpf_target_arm
|
||||||
|
@ -341,8 +341,8 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
|
||||||
#ifndef bpf_target_defined
|
#ifndef bpf_target_defined
|
||||||
#if defined(__x86_64__)
|
#if defined(__x86_64__)
|
||||||
#define bpf_target_x86
|
#define bpf_target_x86
|
||||||
#elif defined(__s390x__)
|
#elif defined(__s390__)
|
||||||
#define bpf_target_s930x
|
#define bpf_target_s390
|
||||||
#elif defined(__arm__)
|
#elif defined(__arm__)
|
||||||
#define bpf_target_arm
|
#define bpf_target_arm
|
||||||
#elif defined(__aarch64__)
|
#elif defined(__aarch64__)
|
||||||
|
@ -358,6 +358,7 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
|
||||||
|
|
||||||
#if defined(bpf_target_x86)
|
#if defined(bpf_target_x86)
|
||||||
|
|
||||||
|
#ifdef __KERNEL__
|
||||||
#define PT_REGS_PARM1(x) ((x)->di)
|
#define PT_REGS_PARM1(x) ((x)->di)
|
||||||
#define PT_REGS_PARM2(x) ((x)->si)
|
#define PT_REGS_PARM2(x) ((x)->si)
|
||||||
#define PT_REGS_PARM3(x) ((x)->dx)
|
#define PT_REGS_PARM3(x) ((x)->dx)
|
||||||
|
@ -368,19 +369,49 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
|
||||||
#define PT_REGS_RC(x) ((x)->ax)
|
#define PT_REGS_RC(x) ((x)->ax)
|
||||||
#define PT_REGS_SP(x) ((x)->sp)
|
#define PT_REGS_SP(x) ((x)->sp)
|
||||||
#define PT_REGS_IP(x) ((x)->ip)
|
#define PT_REGS_IP(x) ((x)->ip)
|
||||||
|
#else
|
||||||
|
#ifdef __i386__
|
||||||
|
/* i386 kernel is built with -mregparm=3 */
|
||||||
|
#define PT_REGS_PARM1(x) ((x)->eax)
|
||||||
|
#define PT_REGS_PARM2(x) ((x)->edx)
|
||||||
|
#define PT_REGS_PARM3(x) ((x)->ecx)
|
||||||
|
#define PT_REGS_PARM4(x) 0
|
||||||
|
#define PT_REGS_PARM5(x) 0
|
||||||
|
#define PT_REGS_RET(x) ((x)->esp)
|
||||||
|
#define PT_REGS_FP(x) ((x)->ebp)
|
||||||
|
#define PT_REGS_RC(x) ((x)->eax)
|
||||||
|
#define PT_REGS_SP(x) ((x)->esp)
|
||||||
|
#define PT_REGS_IP(x) ((x)->eip)
|
||||||
|
#else
|
||||||
|
#define PT_REGS_PARM1(x) ((x)->rdi)
|
||||||
|
#define PT_REGS_PARM2(x) ((x)->rsi)
|
||||||
|
#define PT_REGS_PARM3(x) ((x)->rdx)
|
||||||
|
#define PT_REGS_PARM4(x) ((x)->rcx)
|
||||||
|
#define PT_REGS_PARM5(x) ((x)->r8)
|
||||||
|
#define PT_REGS_RET(x) ((x)->rsp)
|
||||||
|
#define PT_REGS_FP(x) ((x)->rbp)
|
||||||
|
#define PT_REGS_RC(x) ((x)->rax)
|
||||||
|
#define PT_REGS_SP(x) ((x)->rsp)
|
||||||
|
#define PT_REGS_IP(x) ((x)->rip)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#elif defined(bpf_target_s390x)
|
#elif defined(bpf_target_s390)
|
||||||
|
|
||||||
#define PT_REGS_PARM1(x) ((x)->gprs[2])
|
/* s390 provides user_pt_regs instead of struct pt_regs to userspace */
|
||||||
#define PT_REGS_PARM2(x) ((x)->gprs[3])
|
struct pt_regs;
|
||||||
#define PT_REGS_PARM3(x) ((x)->gprs[4])
|
#define PT_REGS_S390 const volatile user_pt_regs
|
||||||
#define PT_REGS_PARM4(x) ((x)->gprs[5])
|
#define PT_REGS_PARM1(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
||||||
#define PT_REGS_PARM5(x) ((x)->gprs[6])
|
#define PT_REGS_PARM2(x) (((PT_REGS_S390 *)(x))->gprs[3])
|
||||||
#define PT_REGS_RET(x) ((x)->gprs[14])
|
#define PT_REGS_PARM3(x) (((PT_REGS_S390 *)(x))->gprs[4])
|
||||||
#define PT_REGS_FP(x) ((x)->gprs[11]) /* Works only with CONFIG_FRAME_POINTER */
|
#define PT_REGS_PARM4(x) (((PT_REGS_S390 *)(x))->gprs[5])
|
||||||
#define PT_REGS_RC(x) ((x)->gprs[2])
|
#define PT_REGS_PARM5(x) (((PT_REGS_S390 *)(x))->gprs[6])
|
||||||
#define PT_REGS_SP(x) ((x)->gprs[15])
|
#define PT_REGS_RET(x) (((PT_REGS_S390 *)(x))->gprs[14])
|
||||||
#define PT_REGS_IP(x) ((x)->psw.addr)
|
/* Works only with CONFIG_FRAME_POINTER */
|
||||||
|
#define PT_REGS_FP(x) (((PT_REGS_S390 *)(x))->gprs[11])
|
||||||
|
#define PT_REGS_RC(x) (((PT_REGS_S390 *)(x))->gprs[2])
|
||||||
|
#define PT_REGS_SP(x) (((PT_REGS_S390 *)(x))->gprs[15])
|
||||||
|
#define PT_REGS_IP(x) (((PT_REGS_S390 *)(x))->psw.addr)
|
||||||
|
|
||||||
#elif defined(bpf_target_arm)
|
#elif defined(bpf_target_arm)
|
||||||
|
|
||||||
|
@ -397,16 +428,20 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
|
||||||
|
|
||||||
#elif defined(bpf_target_arm64)
|
#elif defined(bpf_target_arm64)
|
||||||
|
|
||||||
#define PT_REGS_PARM1(x) ((x)->regs[0])
|
/* arm64 provides struct user_pt_regs instead of struct pt_regs to userspace */
|
||||||
#define PT_REGS_PARM2(x) ((x)->regs[1])
|
struct pt_regs;
|
||||||
#define PT_REGS_PARM3(x) ((x)->regs[2])
|
#define PT_REGS_ARM64 const volatile struct user_pt_regs
|
||||||
#define PT_REGS_PARM4(x) ((x)->regs[3])
|
#define PT_REGS_PARM1(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
||||||
#define PT_REGS_PARM5(x) ((x)->regs[4])
|
#define PT_REGS_PARM2(x) (((PT_REGS_ARM64 *)(x))->regs[1])
|
||||||
#define PT_REGS_RET(x) ((x)->regs[30])
|
#define PT_REGS_PARM3(x) (((PT_REGS_ARM64 *)(x))->regs[2])
|
||||||
#define PT_REGS_FP(x) ((x)->regs[29]) /* Works only with CONFIG_FRAME_POINTER */
|
#define PT_REGS_PARM4(x) (((PT_REGS_ARM64 *)(x))->regs[3])
|
||||||
#define PT_REGS_RC(x) ((x)->regs[0])
|
#define PT_REGS_PARM5(x) (((PT_REGS_ARM64 *)(x))->regs[4])
|
||||||
#define PT_REGS_SP(x) ((x)->sp)
|
#define PT_REGS_RET(x) (((PT_REGS_ARM64 *)(x))->regs[30])
|
||||||
#define PT_REGS_IP(x) ((x)->pc)
|
/* Works only with CONFIG_FRAME_POINTER */
|
||||||
|
#define PT_REGS_FP(x) (((PT_REGS_ARM64 *)(x))->regs[29])
|
||||||
|
#define PT_REGS_RC(x) (((PT_REGS_ARM64 *)(x))->regs[0])
|
||||||
|
#define PT_REGS_SP(x) (((PT_REGS_ARM64 *)(x))->sp)
|
||||||
|
#define PT_REGS_IP(x) (((PT_REGS_ARM64 *)(x))->pc)
|
||||||
|
|
||||||
#elif defined(bpf_target_mips)
|
#elif defined(bpf_target_mips)
|
||||||
|
|
||||||
|
@ -452,10 +487,10 @@ static int (*bpf_skb_adjust_room)(void *ctx, __s32 len_diff, __u32 mode,
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef bpf_target_powerpc
|
#if defined(bpf_target_powerpc)
|
||||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = (ctx)->link; })
|
||||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||||
#elif bpf_target_sparc
|
#elif defined(bpf_target_sparc)
|
||||||
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
#define BPF_KPROBE_READ_RET_IP(ip, ctx) ({ (ip) = PT_REGS_RET(ctx); })
|
||||||
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
#define BPF_KRETPROBE_READ_RET_IP BPF_KPROBE_READ_RET_IP
|
||||||
#else
|
#else
|
||||||
|
|
|
@ -21,12 +21,6 @@ ssize_t get_base_addr() {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef __x86_64__
|
|
||||||
#define SYS_KPROBE_NAME "__x64_sys_nanosleep"
|
|
||||||
#else
|
|
||||||
#define SYS_KPROBE_NAME "sys_nanosleep"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
void test_attach_probe(void)
|
void test_attach_probe(void)
|
||||||
{
|
{
|
||||||
const char *kprobe_name = "kprobe/sys_nanosleep";
|
const char *kprobe_name = "kprobe/sys_nanosleep";
|
||||||
|
@ -84,7 +78,7 @@ void test_attach_probe(void)
|
||||||
|
|
||||||
kprobe_link = bpf_program__attach_kprobe(kprobe_prog,
|
kprobe_link = bpf_program__attach_kprobe(kprobe_prog,
|
||||||
false /* retprobe */,
|
false /* retprobe */,
|
||||||
SYS_KPROBE_NAME);
|
SYS_NANOSLEEP_KPROBE_NAME);
|
||||||
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
|
if (CHECK(IS_ERR(kprobe_link), "attach_kprobe",
|
||||||
"err %ld\n", PTR_ERR(kprobe_link))) {
|
"err %ld\n", PTR_ERR(kprobe_link))) {
|
||||||
kprobe_link = NULL;
|
kprobe_link = NULL;
|
||||||
|
@ -92,7 +86,7 @@ void test_attach_probe(void)
|
||||||
}
|
}
|
||||||
kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog,
|
kretprobe_link = bpf_program__attach_kprobe(kretprobe_prog,
|
||||||
true /* retprobe */,
|
true /* retprobe */,
|
||||||
SYS_KPROBE_NAME);
|
SYS_NANOSLEEP_KPROBE_NAME);
|
||||||
if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
|
if (CHECK(IS_ERR(kretprobe_link), "attach_kretprobe",
|
||||||
"err %ld\n", PTR_ERR(kretprobe_link))) {
|
"err %ld\n", PTR_ERR(kretprobe_link))) {
|
||||||
kretprobe_link = NULL;
|
kretprobe_link = NULL;
|
||||||
|
|
|
@ -5,12 +5,6 @@
|
||||||
#include <sys/socket.h>
|
#include <sys/socket.h>
|
||||||
#include <test_progs.h>
|
#include <test_progs.h>
|
||||||
|
|
||||||
#ifdef __x86_64__
|
|
||||||
#define SYS_KPROBE_NAME "__x64_sys_nanosleep"
|
|
||||||
#else
|
|
||||||
#define SYS_KPROBE_NAME "sys_nanosleep"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
|
static void on_sample(void *ctx, int cpu, void *data, __u32 size)
|
||||||
{
|
{
|
||||||
int cpu_data = *(int *)data, duration = 0;
|
int cpu_data = *(int *)data, duration = 0;
|
||||||
|
@ -56,7 +50,7 @@ void test_perf_buffer(void)
|
||||||
|
|
||||||
/* attach kprobe */
|
/* attach kprobe */
|
||||||
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
|
link = bpf_program__attach_kprobe(prog, false /* retprobe */,
|
||||||
SYS_KPROBE_NAME);
|
SYS_NANOSLEEP_KPROBE_NAME);
|
||||||
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
|
if (CHECK(IS_ERR(link), "attach_kprobe", "err %ld\n", PTR_ERR(link)))
|
||||||
goto out_close;
|
goto out_close;
|
||||||
|
|
||||||
|
|
|
@ -173,6 +173,18 @@ static int test_send_signal_tracepoint(void)
|
||||||
return test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint");
|
return test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int test_send_signal_perf(void)
|
||||||
|
{
|
||||||
|
struct perf_event_attr attr = {
|
||||||
|
.sample_period = 1,
|
||||||
|
.type = PERF_TYPE_SOFTWARE,
|
||||||
|
.config = PERF_COUNT_SW_CPU_CLOCK,
|
||||||
|
};
|
||||||
|
|
||||||
|
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
|
||||||
|
"perf_sw_event");
|
||||||
|
}
|
||||||
|
|
||||||
static int test_send_signal_nmi(void)
|
static int test_send_signal_nmi(void)
|
||||||
{
|
{
|
||||||
struct perf_event_attr attr = {
|
struct perf_event_attr attr = {
|
||||||
|
@ -181,8 +193,26 @@ static int test_send_signal_nmi(void)
|
||||||
.type = PERF_TYPE_HARDWARE,
|
.type = PERF_TYPE_HARDWARE,
|
||||||
.config = PERF_COUNT_HW_CPU_CYCLES,
|
.config = PERF_COUNT_HW_CPU_CYCLES,
|
||||||
};
|
};
|
||||||
|
int pmu_fd;
|
||||||
|
|
||||||
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, "perf_event");
|
/* Some setups (e.g. virtual machines) might run with hardware
|
||||||
|
* perf events disabled. If this is the case, skip this test.
|
||||||
|
*/
|
||||||
|
pmu_fd = syscall(__NR_perf_event_open, &attr, 0 /* pid */,
|
||||||
|
-1 /* cpu */, -1 /* group_fd */, 0 /* flags */);
|
||||||
|
if (pmu_fd == -1) {
|
||||||
|
if (errno == ENOENT) {
|
||||||
|
printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n",
|
||||||
|
__func__);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
/* Let the test fail with a more informative message */
|
||||||
|
} else {
|
||||||
|
close(pmu_fd);
|
||||||
|
}
|
||||||
|
|
||||||
|
return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT,
|
||||||
|
"perf_hw_event");
|
||||||
}
|
}
|
||||||
|
|
||||||
void test_send_signal(void)
|
void test_send_signal(void)
|
||||||
|
@ -190,6 +220,7 @@ void test_send_signal(void)
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ret |= test_send_signal_tracepoint();
|
ret |= test_send_signal_tracepoint();
|
||||||
|
ret |= test_send_signal_perf();
|
||||||
ret |= test_send_signal_nmi();
|
ret |= test_send_signal_nmi();
|
||||||
if (!ret)
|
if (!ret)
|
||||||
printf("test_send_signal:OK\n");
|
printf("test_send_signal:OK\n");
|
||||||
|
|
|
@ -18,7 +18,7 @@ int nested_loops(volatile struct pt_regs* ctx)
|
||||||
for (j = 0; j < 300; j++)
|
for (j = 0; j < 300; j++)
|
||||||
for (i = 0; i < j; i++) {
|
for (i = 0; i < j; i++) {
|
||||||
if (j & 1)
|
if (j & 1)
|
||||||
m = ctx->rax;
|
m = PT_REGS_RC(ctx);
|
||||||
else
|
else
|
||||||
m = j;
|
m = j;
|
||||||
sum += i * m;
|
sum += i * m;
|
||||||
|
|
|
@ -16,7 +16,7 @@ int while_true(volatile struct pt_regs* ctx)
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
if (ctx->rax & 1)
|
if (PT_REGS_RC(ctx) & 1)
|
||||||
i += 3;
|
i += 3;
|
||||||
else
|
else
|
||||||
i += 7;
|
i += 7;
|
||||||
|
|
|
@ -16,7 +16,7 @@ int while_true(volatile struct pt_regs* ctx)
|
||||||
__u64 i = 0, sum = 0;
|
__u64 i = 0, sum = 0;
|
||||||
do {
|
do {
|
||||||
i++;
|
i++;
|
||||||
sum += ctx->rax;
|
sum += PT_REGS_RC(ctx);
|
||||||
} while (i < 0x100000000ULL);
|
} while (i < 0x100000000ULL);
|
||||||
return sum;
|
return sum;
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,11 +47,12 @@ struct {
|
||||||
* issue and avoid complicated C programming massaging.
|
* issue and avoid complicated C programming massaging.
|
||||||
* This is an acceptable workaround since there is one entry here.
|
* This is an acceptable workaround since there is one entry here.
|
||||||
*/
|
*/
|
||||||
|
typedef __u64 raw_stack_trace_t[2 * MAX_STACK_RAWTP];
|
||||||
struct {
|
struct {
|
||||||
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
|
||||||
__uint(max_entries, 1);
|
__uint(max_entries, 1);
|
||||||
__type(key, __u32);
|
__type(key, __u32);
|
||||||
__u64 (*value)[2 * MAX_STACK_RAWTP];
|
__type(value, raw_stack_trace_t);
|
||||||
} rawdata_map SEC(".maps");
|
} rawdata_map SEC(".maps");
|
||||||
|
|
||||||
SEC("tracepoint/raw_syscalls/sys_enter")
|
SEC("tracepoint/raw_syscalls/sys_enter")
|
||||||
|
|
|
@ -36,8 +36,7 @@ struct {
|
||||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||||
__uint(max_entries, 128);
|
__uint(max_entries, 128);
|
||||||
__type(key, __u32);
|
__type(key, __u32);
|
||||||
/* there seems to be a bug in kernel not handling typedef properly */
|
__type(value, stack_trace_t);
|
||||||
struct bpf_stack_build_id (*value)[PERF_MAX_STACK_DEPTH];
|
|
||||||
} stack_amap SEC(".maps");
|
} stack_amap SEC(".maps");
|
||||||
|
|
||||||
/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
|
/* taken from /sys/kernel/debug/tracing/events/random/urandom_read/format */
|
||||||
|
|
|
@ -35,7 +35,7 @@ struct {
|
||||||
__uint(type, BPF_MAP_TYPE_ARRAY);
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
||||||
__uint(max_entries, 16384);
|
__uint(max_entries, 16384);
|
||||||
__type(key, __u32);
|
__type(key, __u32);
|
||||||
__u64 (*value)[PERF_MAX_STACK_DEPTH];
|
__type(value, stack_trace_t);
|
||||||
} stack_amap SEC(".maps");
|
} stack_amap SEC(".maps");
|
||||||
|
|
||||||
/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
|
/* taken from /sys/kernel/debug/tracing/events/sched/sched_switch/format */
|
||||||
|
|
|
@ -14,6 +14,7 @@
|
||||||
#include <linux/tcp.h>
|
#include <linux/tcp.h>
|
||||||
#include <linux/udp.h>
|
#include <linux/udp.h>
|
||||||
#include "bpf_helpers.h"
|
#include "bpf_helpers.h"
|
||||||
|
#include "bpf_endian.h"
|
||||||
|
|
||||||
static __u32 rol32(__u32 word, unsigned int shift)
|
static __u32 rol32(__u32 word, unsigned int shift)
|
||||||
{
|
{
|
||||||
|
@ -305,7 +306,7 @@ bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval,
|
||||||
ip6h->nexthdr = IPPROTO_IPV6;
|
ip6h->nexthdr = IPPROTO_IPV6;
|
||||||
ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
|
ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0];
|
||||||
ip6h->payload_len =
|
ip6h->payload_len =
|
||||||
__builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr));
|
bpf_htons(pkt_bytes + sizeof(struct ipv6hdr));
|
||||||
ip6h->hop_limit = 4;
|
ip6h->hop_limit = 4;
|
||||||
|
|
||||||
ip6h->saddr.in6_u.u6_addr32[0] = 1;
|
ip6h->saddr.in6_u.u6_addr32[0] = 1;
|
||||||
|
@ -322,7 +323,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
|
||||||
struct real_definition *dst, __u32 pkt_bytes)
|
struct real_definition *dst, __u32 pkt_bytes)
|
||||||
{
|
{
|
||||||
|
|
||||||
__u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]);
|
__u32 ip_suffix = bpf_ntohs(pckt->flow.port16[0]);
|
||||||
struct eth_hdr *new_eth;
|
struct eth_hdr *new_eth;
|
||||||
struct eth_hdr *old_eth;
|
struct eth_hdr *old_eth;
|
||||||
__u16 *next_iph_u16;
|
__u16 *next_iph_u16;
|
||||||
|
@ -352,7 +353,7 @@ bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval,
|
||||||
iph->protocol = IPPROTO_IPIP;
|
iph->protocol = IPPROTO_IPIP;
|
||||||
iph->check = 0;
|
iph->check = 0;
|
||||||
iph->tos = 1;
|
iph->tos = 1;
|
||||||
iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr));
|
iph->tot_len = bpf_htons(pkt_bytes + sizeof(struct iphdr));
|
||||||
/* don't update iph->daddr, since it will overwrite old eth_proto
|
/* don't update iph->daddr, since it will overwrite old eth_proto
|
||||||
* and multiple iterations of bpf_prog_run() will fail
|
* and multiple iterations of bpf_prog_run() will fail
|
||||||
*/
|
*/
|
||||||
|
@ -639,7 +640,7 @@ static int process_l3_headers_v6(struct packet_description *pckt,
|
||||||
iph_len = sizeof(struct ipv6hdr);
|
iph_len = sizeof(struct ipv6hdr);
|
||||||
*protocol = ip6h->nexthdr;
|
*protocol = ip6h->nexthdr;
|
||||||
pckt->flow.proto = *protocol;
|
pckt->flow.proto = *protocol;
|
||||||
*pkt_bytes = __builtin_bswap16(ip6h->payload_len);
|
*pkt_bytes = bpf_ntohs(ip6h->payload_len);
|
||||||
off += iph_len;
|
off += iph_len;
|
||||||
if (*protocol == 45) {
|
if (*protocol == 45) {
|
||||||
return XDP_DROP;
|
return XDP_DROP;
|
||||||
|
@ -671,7 +672,7 @@ static int process_l3_headers_v4(struct packet_description *pckt,
|
||||||
return XDP_DROP;
|
return XDP_DROP;
|
||||||
*protocol = iph->protocol;
|
*protocol = iph->protocol;
|
||||||
pckt->flow.proto = *protocol;
|
pckt->flow.proto = *protocol;
|
||||||
*pkt_bytes = __builtin_bswap16(iph->tot_len);
|
*pkt_bytes = bpf_ntohs(iph->tot_len);
|
||||||
off += 20;
|
off += 20;
|
||||||
if (iph->frag_off & 65343)
|
if (iph->frag_off & 65343)
|
||||||
return XDP_DROP;
|
return XDP_DROP;
|
||||||
|
@ -808,10 +809,10 @@ int balancer_ingress(struct xdp_md *ctx)
|
||||||
nh_off = sizeof(struct eth_hdr);
|
nh_off = sizeof(struct eth_hdr);
|
||||||
if (data + nh_off > data_end)
|
if (data + nh_off > data_end)
|
||||||
return XDP_DROP;
|
return XDP_DROP;
|
||||||
eth_proto = eth->eth_proto;
|
eth_proto = bpf_ntohs(eth->eth_proto);
|
||||||
if (eth_proto == 8)
|
if (eth_proto == ETH_P_IP)
|
||||||
return process_packet(data, nh_off, data_end, 0, ctx);
|
return process_packet(data, nh_off, data_end, 0, ctx);
|
||||||
else if (eth_proto == 56710)
|
else if (eth_proto == ETH_P_IPV6)
|
||||||
return process_packet(data, nh_off, data_end, 1, ctx);
|
return process_packet(data, nh_off, data_end, 1, ctx);
|
||||||
else
|
else
|
||||||
return XDP_DROP;
|
return XDP_DROP;
|
||||||
|
|
|
@ -3417,6 +3417,94 @@ static struct btf_raw_test raw_tests[] = {
|
||||||
.value_type_id = 1,
|
.value_type_id = 1,
|
||||||
.max_entries = 4,
|
.max_entries = 4,
|
||||||
},
|
},
|
||||||
|
/*
|
||||||
|
* typedef int arr_t[16];
|
||||||
|
* struct s {
|
||||||
|
* arr_t *a;
|
||||||
|
* };
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
.descr = "struct->ptr->typedef->array->int size resolution",
|
||||||
|
.raw_types = {
|
||||||
|
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
|
||||||
|
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
|
||||||
|
BTF_PTR_ENC(3), /* [2] */
|
||||||
|
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
|
||||||
|
BTF_TYPE_ARRAY_ENC(5, 5, 16), /* [4] */
|
||||||
|
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [5] */
|
||||||
|
BTF_END_RAW,
|
||||||
|
},
|
||||||
|
BTF_STR_SEC("\0s\0a\0arr_t"),
|
||||||
|
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||||
|
.map_name = "ptr_mod_chain_size_resolve_map",
|
||||||
|
.key_size = sizeof(int),
|
||||||
|
.value_size = sizeof(int) * 16,
|
||||||
|
.key_type_id = 5 /* int */,
|
||||||
|
.value_type_id = 3 /* arr_t */,
|
||||||
|
.max_entries = 4,
|
||||||
|
},
|
||||||
|
/*
|
||||||
|
* typedef int arr_t[16][8][4];
|
||||||
|
* struct s {
|
||||||
|
* arr_t *a;
|
||||||
|
* };
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
.descr = "struct->ptr->typedef->multi-array->int size resolution",
|
||||||
|
.raw_types = {
|
||||||
|
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
|
||||||
|
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
|
||||||
|
BTF_PTR_ENC(3), /* [2] */
|
||||||
|
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
|
||||||
|
BTF_TYPE_ARRAY_ENC(5, 7, 16), /* [4] */
|
||||||
|
BTF_TYPE_ARRAY_ENC(6, 7, 8), /* [5] */
|
||||||
|
BTF_TYPE_ARRAY_ENC(7, 7, 4), /* [6] */
|
||||||
|
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [7] */
|
||||||
|
BTF_END_RAW,
|
||||||
|
},
|
||||||
|
BTF_STR_SEC("\0s\0a\0arr_t"),
|
||||||
|
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||||
|
.map_name = "multi_arr_size_resolve_map",
|
||||||
|
.key_size = sizeof(int),
|
||||||
|
.value_size = sizeof(int) * 16 * 8 * 4,
|
||||||
|
.key_type_id = 7 /* int */,
|
||||||
|
.value_type_id = 3 /* arr_t */,
|
||||||
|
.max_entries = 4,
|
||||||
|
},
|
||||||
|
/*
|
||||||
|
* typedef int int_t;
|
||||||
|
* typedef int_t arr3_t[4];
|
||||||
|
* typedef arr3_t arr2_t[8];
|
||||||
|
* typedef arr2_t arr1_t[16];
|
||||||
|
* struct s {
|
||||||
|
* arr1_t *a;
|
||||||
|
* };
|
||||||
|
*/
|
||||||
|
{
|
||||||
|
.descr = "typedef/multi-arr mix size resolution",
|
||||||
|
.raw_types = {
|
||||||
|
BTF_STRUCT_ENC(NAME_TBD, 1, 8), /* [1] */
|
||||||
|
BTF_MEMBER_ENC(NAME_TBD, 2, 0),
|
||||||
|
BTF_PTR_ENC(3), /* [2] */
|
||||||
|
BTF_TYPEDEF_ENC(NAME_TBD, 4), /* [3] */
|
||||||
|
BTF_TYPE_ARRAY_ENC(5, 10, 16), /* [4] */
|
||||||
|
BTF_TYPEDEF_ENC(NAME_TBD, 6), /* [5] */
|
||||||
|
BTF_TYPE_ARRAY_ENC(7, 10, 8), /* [6] */
|
||||||
|
BTF_TYPEDEF_ENC(NAME_TBD, 8), /* [7] */
|
||||||
|
BTF_TYPE_ARRAY_ENC(9, 10, 4), /* [8] */
|
||||||
|
BTF_TYPEDEF_ENC(NAME_TBD, 10), /* [9] */
|
||||||
|
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [10] */
|
||||||
|
BTF_END_RAW,
|
||||||
|
},
|
||||||
|
BTF_STR_SEC("\0s\0a\0arr1_t\0arr2_t\0arr3_t\0int_t"),
|
||||||
|
.map_type = BPF_MAP_TYPE_ARRAY,
|
||||||
|
.map_name = "typedef_arra_mix_size_resolve_map",
|
||||||
|
.key_size = sizeof(int),
|
||||||
|
.value_size = sizeof(int) * 16 * 8 * 4,
|
||||||
|
.key_type_id = 10 /* int */,
|
||||||
|
.value_type_id = 3 /* arr_t */,
|
||||||
|
.max_entries = 4,
|
||||||
|
},
|
||||||
|
|
||||||
}; /* struct btf_raw_test raw_tests[] */
|
}; /* struct btf_raw_test raw_tests[] */
|
||||||
|
|
||||||
|
|
|
@ -92,3 +92,11 @@ int compare_map_keys(int map1_fd, int map2_fd);
|
||||||
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
|
int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len);
|
||||||
int extract_build_id(char *build_id, size_t size);
|
int extract_build_id(char *build_id, size_t size);
|
||||||
void *spin_lock_thread(void *arg);
|
void *spin_lock_thread(void *arg);
|
||||||
|
|
||||||
|
#ifdef __x86_64__
|
||||||
|
#define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
|
||||||
|
#elif defined(__s390x__)
|
||||||
|
#define SYS_NANOSLEEP_KPROBE_NAME "__s390x_sys_nanosleep"
|
||||||
|
#else
|
||||||
|
#define SYS_NANOSLEEP_KPROBE_NAME "sys_nanosleep"
|
||||||
|
#endif
|
||||||
|
|
|
@ -86,7 +86,7 @@ struct bpf_test {
|
||||||
int fixup_sk_storage_map[MAX_FIXUPS];
|
int fixup_sk_storage_map[MAX_FIXUPS];
|
||||||
const char *errstr;
|
const char *errstr;
|
||||||
const char *errstr_unpriv;
|
const char *errstr_unpriv;
|
||||||
uint32_t retval, retval_unpriv, insn_processed;
|
uint32_t insn_processed;
|
||||||
int prog_len;
|
int prog_len;
|
||||||
enum {
|
enum {
|
||||||
UNDEF,
|
UNDEF,
|
||||||
|
@ -95,16 +95,20 @@ struct bpf_test {
|
||||||
} result, result_unpriv;
|
} result, result_unpriv;
|
||||||
enum bpf_prog_type prog_type;
|
enum bpf_prog_type prog_type;
|
||||||
uint8_t flags;
|
uint8_t flags;
|
||||||
__u8 data[TEST_DATA_LEN];
|
|
||||||
void (*fill_helper)(struct bpf_test *self);
|
void (*fill_helper)(struct bpf_test *self);
|
||||||
uint8_t runs;
|
uint8_t runs;
|
||||||
struct {
|
#define bpf_testdata_struct_t \
|
||||||
uint32_t retval, retval_unpriv;
|
struct { \
|
||||||
|
uint32_t retval, retval_unpriv; \
|
||||||
|
union { \
|
||||||
|
__u8 data[TEST_DATA_LEN]; \
|
||||||
|
__u64 data64[TEST_DATA_LEN / 8]; \
|
||||||
|
}; \
|
||||||
|
}
|
||||||
union {
|
union {
|
||||||
__u8 data[TEST_DATA_LEN];
|
bpf_testdata_struct_t;
|
||||||
__u64 data64[TEST_DATA_LEN / 8];
|
bpf_testdata_struct_t retvals[MAX_TEST_RUNS];
|
||||||
};
|
};
|
||||||
} retvals[MAX_TEST_RUNS];
|
|
||||||
enum bpf_attach_type expected_attach_type;
|
enum bpf_attach_type expected_attach_type;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -949,17 +953,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
|
||||||
uint32_t expected_val;
|
uint32_t expected_val;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!test->runs) {
|
if (!test->runs)
|
||||||
expected_val = unpriv && test->retval_unpriv ?
|
test->runs = 1;
|
||||||
test->retval_unpriv : test->retval;
|
|
||||||
|
|
||||||
err = do_prog_test_run(fd_prog, unpriv, expected_val,
|
|
||||||
test->data, sizeof(test->data));
|
|
||||||
if (err)
|
|
||||||
run_errs++;
|
|
||||||
else
|
|
||||||
run_successes++;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < test->runs; i++) {
|
for (i = 0; i < test->runs; i++) {
|
||||||
if (unpriv && test->retvals[i].retval_unpriv)
|
if (unpriv && test->retvals[i].retval_unpriv)
|
||||||
|
|
|
@ -226,7 +226,7 @@
|
||||||
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
BPF_LD_MAP_FD(BPF_REG_1, 0),
|
||||||
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
|
||||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
|
||||||
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
|
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
|
||||||
BPF_EXIT_INSN(),
|
BPF_EXIT_INSN(),
|
||||||
},
|
},
|
||||||
.fixup_map_array_ro = { 3 },
|
.fixup_map_array_ro = { 3 },
|
||||||
|
|
|
@ -183,7 +183,7 @@
|
||||||
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
|
||||||
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
|
||||||
BPF_EXIT_INSN(),
|
BPF_EXIT_INSN(),
|
||||||
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
|
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
|
||||||
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
|
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3),
|
||||||
BPF_MOV64_IMM(BPF_REG_2, 0),
|
BPF_MOV64_IMM(BPF_REG_2, 0),
|
||||||
BPF_MOV64_IMM(BPF_REG_3, 0x100000),
|
BPF_MOV64_IMM(BPF_REG_3, 0x100000),
|
||||||
|
|
73
tools/testing/selftests/bpf/verifier/wide_access.c
Normal file
73
tools/testing/selftests/bpf/verifier/wide_access.c
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
#define BPF_SOCK_ADDR_STORE(field, off, res, err) \
|
||||||
|
{ \
|
||||||
|
"wide store to bpf_sock_addr." #field "[" #off "]", \
|
||||||
|
.insns = { \
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1), \
|
||||||
|
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, \
|
||||||
|
offsetof(struct bpf_sock_addr, field[off])), \
|
||||||
|
BPF_EXIT_INSN(), \
|
||||||
|
}, \
|
||||||
|
.result = res, \
|
||||||
|
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
|
||||||
|
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
|
||||||
|
.errstr = err, \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* user_ip6[0] is u64 aligned */
|
||||||
|
BPF_SOCK_ADDR_STORE(user_ip6, 0, ACCEPT,
|
||||||
|
NULL),
|
||||||
|
BPF_SOCK_ADDR_STORE(user_ip6, 1, REJECT,
|
||||||
|
"invalid bpf_context access off=12 size=8"),
|
||||||
|
BPF_SOCK_ADDR_STORE(user_ip6, 2, ACCEPT,
|
||||||
|
NULL),
|
||||||
|
BPF_SOCK_ADDR_STORE(user_ip6, 3, REJECT,
|
||||||
|
"invalid bpf_context access off=20 size=8"),
|
||||||
|
|
||||||
|
/* msg_src_ip6[0] is _not_ u64 aligned */
|
||||||
|
BPF_SOCK_ADDR_STORE(msg_src_ip6, 0, REJECT,
|
||||||
|
"invalid bpf_context access off=44 size=8"),
|
||||||
|
BPF_SOCK_ADDR_STORE(msg_src_ip6, 1, ACCEPT,
|
||||||
|
NULL),
|
||||||
|
BPF_SOCK_ADDR_STORE(msg_src_ip6, 2, REJECT,
|
||||||
|
"invalid bpf_context access off=52 size=8"),
|
||||||
|
BPF_SOCK_ADDR_STORE(msg_src_ip6, 3, REJECT,
|
||||||
|
"invalid bpf_context access off=56 size=8"),
|
||||||
|
|
||||||
|
#undef BPF_SOCK_ADDR_STORE
|
||||||
|
|
||||||
|
#define BPF_SOCK_ADDR_LOAD(field, off, res, err) \
|
||||||
|
{ \
|
||||||
|
"wide load from bpf_sock_addr." #field "[" #off "]", \
|
||||||
|
.insns = { \
|
||||||
|
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, \
|
||||||
|
offsetof(struct bpf_sock_addr, field[off])), \
|
||||||
|
BPF_MOV64_IMM(BPF_REG_0, 1), \
|
||||||
|
BPF_EXIT_INSN(), \
|
||||||
|
}, \
|
||||||
|
.result = res, \
|
||||||
|
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
|
||||||
|
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
|
||||||
|
.errstr = err, \
|
||||||
|
}
|
||||||
|
|
||||||
|
/* user_ip6[0] is u64 aligned */
|
||||||
|
BPF_SOCK_ADDR_LOAD(user_ip6, 0, ACCEPT,
|
||||||
|
NULL),
|
||||||
|
BPF_SOCK_ADDR_LOAD(user_ip6, 1, REJECT,
|
||||||
|
"invalid bpf_context access off=12 size=8"),
|
||||||
|
BPF_SOCK_ADDR_LOAD(user_ip6, 2, ACCEPT,
|
||||||
|
NULL),
|
||||||
|
BPF_SOCK_ADDR_LOAD(user_ip6, 3, REJECT,
|
||||||
|
"invalid bpf_context access off=20 size=8"),
|
||||||
|
|
||||||
|
/* msg_src_ip6[0] is _not_ u64 aligned */
|
||||||
|
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 0, REJECT,
|
||||||
|
"invalid bpf_context access off=44 size=8"),
|
||||||
|
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 1, ACCEPT,
|
||||||
|
NULL),
|
||||||
|
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 2, REJECT,
|
||||||
|
"invalid bpf_context access off=52 size=8"),
|
||||||
|
BPF_SOCK_ADDR_LOAD(msg_src_ip6, 3, REJECT,
|
||||||
|
"invalid bpf_context access off=56 size=8"),
|
||||||
|
|
||||||
|
#undef BPF_SOCK_ADDR_LOAD
|
|
@ -1,36 +0,0 @@
|
||||||
#define BPF_SOCK_ADDR(field, off, res, err) \
|
|
||||||
{ \
|
|
||||||
"wide store to bpf_sock_addr." #field "[" #off "]", \
|
|
||||||
.insns = { \
|
|
||||||
BPF_MOV64_IMM(BPF_REG_0, 1), \
|
|
||||||
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, \
|
|
||||||
offsetof(struct bpf_sock_addr, field[off])), \
|
|
||||||
BPF_EXIT_INSN(), \
|
|
||||||
}, \
|
|
||||||
.result = res, \
|
|
||||||
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK_ADDR, \
|
|
||||||
.expected_attach_type = BPF_CGROUP_UDP6_SENDMSG, \
|
|
||||||
.errstr = err, \
|
|
||||||
}
|
|
||||||
|
|
||||||
/* user_ip6[0] is u64 aligned */
|
|
||||||
BPF_SOCK_ADDR(user_ip6, 0, ACCEPT,
|
|
||||||
NULL),
|
|
||||||
BPF_SOCK_ADDR(user_ip6, 1, REJECT,
|
|
||||||
"invalid bpf_context access off=12 size=8"),
|
|
||||||
BPF_SOCK_ADDR(user_ip6, 2, ACCEPT,
|
|
||||||
NULL),
|
|
||||||
BPF_SOCK_ADDR(user_ip6, 3, REJECT,
|
|
||||||
"invalid bpf_context access off=20 size=8"),
|
|
||||||
|
|
||||||
/* msg_src_ip6[0] is _not_ u64 aligned */
|
|
||||||
BPF_SOCK_ADDR(msg_src_ip6, 0, REJECT,
|
|
||||||
"invalid bpf_context access off=44 size=8"),
|
|
||||||
BPF_SOCK_ADDR(msg_src_ip6, 1, ACCEPT,
|
|
||||||
NULL),
|
|
||||||
BPF_SOCK_ADDR(msg_src_ip6, 2, REJECT,
|
|
||||||
"invalid bpf_context access off=52 size=8"),
|
|
||||||
BPF_SOCK_ADDR(msg_src_ip6, 3, REJECT,
|
|
||||||
"invalid bpf_context access off=56 size=8"),
|
|
||||||
|
|
||||||
#undef BPF_SOCK_ADDR
|
|
Loading…
Reference in a new issue