Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix TTL offset calculation in mac80211 mesh code, from Peter Oh. 2) Fix races with procfs in ipt_CLUSTERIP, from Cong Wang. 3) Memory leak fix in lpm_trie BPF map code, from Yonghong Song. 4) Need to use GFP_ATOMIC in BPF cpumap allocations, from Jason Wang. 5) Fix potential deadlocks in netfilter getsockopt() code paths, from Paolo Abeni. 6) Netfilter stackpointer size checks really are needed to validate user input, from Florian Westphal. 7) Missing timer init in x_tables, from Paolo Abeni. 8) Don't use WQ_MEM_RECLAIM in mac80211 hwsim, from Johannes Berg. 9) When an ibmvnic device is brought down then back up again, it can be sent queue entries from a previous session, handle this properly instead of crashing. From Thomas Falcon. 10) Fix TCP checksum on LRO buffers in mlx5e, from Gal Pressman. 11) When we are dumping filters in cls_api, the output SKB is empty, and the filter we are dumping is too large for the space in the SKB, we should return -EMSGSIZE like other netlink dump operations do. Otherwise userland has no signal that is needs to increase the size of its read buffer. From Roman Kapl. 12) Several XDP fixes for virtio_net, from Jesper Dangaard Brouer. 13) Module refcount leak in netlink when a dump start fails, from Jason Donenfeld. 14) Handle sub-optimal GSO sizes better in TCP BBR congestion control, from Eric Dumazet. 15) Releasing bpf per-cpu arraymaps can take a long time, add a condtional scheduling point. From Eric Dumazet. 16) Implement retpolines for tail calls in x64 and arm64 bpf JITs. From Daniel Borkmann. 17) Fix page leak in gianfar driver, from Andy Spencer. 18) Missed clearing of estimator scratch buffer, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits) net_sched: gen_estimator: fix broken estimators based on percpu stats gianfar: simplify FCS handling and fix memory leak ipv6 sit: work around bogus gcc-8 -Wrestrict warning macvlan: fix use-after-free in macvlan_common_newlink() bpf, arm64: fix out of bounds access in tail call bpf, x64: implement retpoline for tail call rxrpc: Fix send in rxrpc_send_data_packet() net: aquantia: Fix error handling in aq_pci_probe() bpf: fix rcu lockdep warning for lpm_trie map_free callback bpf: add schedule points in percpu arrays management regulatory: add NUL to request alpha2 ibmvnic: Fix early release of login buffer net/smc9194: Remove bogus CONFIG_MAC reference net: ipv4: Set addr_type in hash_keys for forwarded case tcp_bbr: better deal with suboptimal GSO smsc75xx: fix smsc75xx_set_features() netlink: put module reference if dump start fails selftests/bpf/test_maps: exit child process without error in ENOMEM case selftests/bpf: update gitignore with test_libbpf_open selftests/bpf: tcpbpf_kern: use in6_* macros from glibc ..
This commit is contained in:
commit
9cb9c07d6b
108 changed files with 624 additions and 454 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
@ -127,3 +127,7 @@ all.config
|
|||
|
||||
# Kdevelop4
|
||||
*.kdev4
|
||||
|
||||
#Automatically generated by ASN.1 compiler
|
||||
net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c
|
||||
net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h
|
||||
|
|
|
@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
|||
off = offsetof(struct bpf_array, map.max_entries);
|
||||
emit_a64_mov_i64(tmp, off, ctx);
|
||||
emit(A64_LDR32(tmp, r2, tmp), ctx);
|
||||
emit(A64_MOV(0, r3, r3), ctx);
|
||||
emit(A64_CMP(0, r3, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
|
||||
|
||||
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
|
||||
* goto out;
|
||||
|
@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
|||
*/
|
||||
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
|
||||
emit(A64_CMP(1, tcc, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
|
||||
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
|
||||
|
||||
/* prog = array->ptrs[index];
|
||||
|
|
|
@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void)
|
|||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
/*
|
||||
* Below is used in the eBPF JIT compiler and emits the byte sequence
|
||||
* for the following assembly:
|
||||
*
|
||||
* With retpolines configured:
|
||||
*
|
||||
* callq do_rop
|
||||
* spec_trap:
|
||||
* pause
|
||||
* lfence
|
||||
* jmp spec_trap
|
||||
* do_rop:
|
||||
* mov %rax,(%rsp)
|
||||
* retq
|
||||
*
|
||||
* Without retpolines configured:
|
||||
*
|
||||
* jmp *%rax
|
||||
*/
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 17
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
EMIT1_off32(0xE8, 7); /* callq do_rop */ \
|
||||
/* spec_trap: */ \
|
||||
EMIT2(0xF3, 0x90); /* pause */ \
|
||||
EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
|
||||
EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
|
||||
/* do_rop: */ \
|
||||
EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
|
||||
EMIT1(0xC3); /* retq */
|
||||
#else
|
||||
# define RETPOLINE_RAX_BPF_JIT_SIZE 2
|
||||
# define RETPOLINE_RAX_BPF_JIT() \
|
||||
EMIT2(0xFF, 0xE0); /* jmp *%rax */
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_NOSPEC_BRANCH_H_ */
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/if_vlan.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <linux/bpf.h>
|
||||
|
||||
/*
|
||||
|
@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
|||
EMIT2(0x89, 0xD2); /* mov edx, edx */
|
||||
EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
|
||||
offsetof(struct bpf_array, map.max_entries));
|
||||
#define OFFSET1 43 /* number of bytes to jump */
|
||||
#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */
|
||||
EMIT2(X86_JBE, OFFSET1); /* jbe out */
|
||||
label1 = cnt;
|
||||
|
||||
|
@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
|||
*/
|
||||
EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */
|
||||
EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
|
||||
#define OFFSET2 32
|
||||
#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JA, OFFSET2); /* ja out */
|
||||
label2 = cnt;
|
||||
EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
|
||||
|
@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
|||
* goto out;
|
||||
*/
|
||||
EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
|
||||
#define OFFSET3 10
|
||||
#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE)
|
||||
EMIT2(X86_JE, OFFSET3); /* je out */
|
||||
label3 = cnt;
|
||||
|
||||
|
@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog)
|
|||
* rdi == ctx (1st arg)
|
||||
* rax == prog->bpf_func + prologue_size
|
||||
*/
|
||||
EMIT2(0xFF, 0xE0); /* jmp rax */
|
||||
RETPOLINE_RAX_BPF_JIT();
|
||||
|
||||
/* out: */
|
||||
BUILD_BUG_ON(cnt - label1 != OFFSET1);
|
||||
|
|
|
@ -426,6 +426,8 @@ static int xgbe_pci_resume(struct pci_dev *pdev)
|
|||
struct net_device *netdev = pdata->netdev;
|
||||
int ret = 0;
|
||||
|
||||
XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
|
||||
|
||||
pdata->lpm_ctrl &= ~MDIO_CTRL1_LPOWER;
|
||||
XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_CTRL1, pdata->lpm_ctrl);
|
||||
|
||||
|
|
|
@ -226,6 +226,10 @@ static int aq_pci_probe(struct pci_dev *pdev,
|
|||
goto err_ioremap;
|
||||
|
||||
self->aq_hw = kzalloc(sizeof(*self->aq_hw), GFP_KERNEL);
|
||||
if (!self->aq_hw) {
|
||||
err = -ENOMEM;
|
||||
goto err_ioremap;
|
||||
}
|
||||
self->aq_hw->aq_nic_cfg = aq_nic_get_cfg(self);
|
||||
|
||||
for (bar = 0; bar < 4; ++bar) {
|
||||
|
@ -235,19 +239,19 @@ static int aq_pci_probe(struct pci_dev *pdev,
|
|||
mmio_pa = pci_resource_start(pdev, bar);
|
||||
if (mmio_pa == 0U) {
|
||||
err = -EIO;
|
||||
goto err_ioremap;
|
||||
goto err_free_aq_hw;
|
||||
}
|
||||
|
||||
reg_sz = pci_resource_len(pdev, bar);
|
||||
if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
|
||||
err = -EIO;
|
||||
goto err_ioremap;
|
||||
goto err_free_aq_hw;
|
||||
}
|
||||
|
||||
self->aq_hw->mmio = ioremap_nocache(mmio_pa, reg_sz);
|
||||
if (!self->aq_hw->mmio) {
|
||||
err = -EIO;
|
||||
goto err_ioremap;
|
||||
goto err_free_aq_hw;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -255,7 +259,7 @@ static int aq_pci_probe(struct pci_dev *pdev,
|
|||
|
||||
if (bar == 4) {
|
||||
err = -EIO;
|
||||
goto err_ioremap;
|
||||
goto err_free_aq_hw;
|
||||
}
|
||||
|
||||
numvecs = min((u8)AQ_CFG_VECS_DEF,
|
||||
|
@ -290,6 +294,8 @@ err_register:
|
|||
aq_pci_free_irq_vectors(self);
|
||||
err_hwinit:
|
||||
iounmap(self->aq_hw->mmio);
|
||||
err_free_aq_hw:
|
||||
kfree(self->aq_hw);
|
||||
err_ioremap:
|
||||
free_netdev(ndev);
|
||||
err_pci_func:
|
||||
|
|
|
@ -2934,29 +2934,17 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
|
|||
{
|
||||
int size = lstatus & BD_LENGTH_MASK;
|
||||
struct page *page = rxb->page;
|
||||
bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
|
||||
|
||||
/* Remove the FCS from the packet length */
|
||||
if (last)
|
||||
size -= ETH_FCS_LEN;
|
||||
|
||||
if (likely(first)) {
|
||||
skb_put(skb, size);
|
||||
} else {
|
||||
/* the last fragments' length contains the full frame length */
|
||||
if (last)
|
||||
if (lstatus & BD_LFLAG(RXBD_LAST))
|
||||
size -= skb->len;
|
||||
|
||||
/* Add the last fragment if it contains something other than
|
||||
* the FCS, otherwise drop it and trim off any part of the FCS
|
||||
* that was already received.
|
||||
*/
|
||||
if (size > 0)
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
rxb->page_offset + RXBUF_ALIGNMENT,
|
||||
size, GFAR_RXB_TRUESIZE);
|
||||
else if (size < 0)
|
||||
pskb_trim(skb, skb->len + size);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
rxb->page_offset + RXBUF_ALIGNMENT,
|
||||
size, GFAR_RXB_TRUESIZE);
|
||||
}
|
||||
|
||||
/* try reuse page */
|
||||
|
@ -3069,6 +3057,9 @@ static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
|
|||
if (priv->padding)
|
||||
skb_pull(skb, priv->padding);
|
||||
|
||||
/* Trim off the FCS */
|
||||
pskb_trim(skb, skb->len - ETH_FCS_LEN);
|
||||
|
||||
if (ndev->features & NETIF_F_RXCSUM)
|
||||
gfar_rx_checksum(skb, fcb);
|
||||
|
||||
|
|
|
@ -1901,6 +1901,11 @@ restart_poll:
|
|||
dev_kfree_skb_any(rx_buff->skb);
|
||||
remove_buff_from_pool(adapter, rx_buff);
|
||||
continue;
|
||||
} else if (!rx_buff->skb) {
|
||||
/* free the entry */
|
||||
next->rx_comp.first = 0;
|
||||
remove_buff_from_pool(adapter, rx_buff);
|
||||
continue;
|
||||
}
|
||||
|
||||
length = be32_to_cpu(next->rx_comp.len);
|
||||
|
@ -3755,7 +3760,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|||
|
||||
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
|
||||
DMA_BIDIRECTIONAL);
|
||||
release_login_buffer(adapter);
|
||||
dma_unmap_single(dev, adapter->login_rsp_buf_token,
|
||||
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
|
||||
|
||||
|
@ -3786,6 +3790,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|||
ibmvnic_remove(adapter->vdev);
|
||||
return -EIO;
|
||||
}
|
||||
release_login_buffer(adapter);
|
||||
complete(&adapter->init_done);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -96,10 +96,10 @@ static void print_lyr_2_4_hdrs(struct trace_seq *p,
|
|||
"%pI4");
|
||||
} else if (ethertype.v == ETH_P_IPV6) {
|
||||
static const struct in6_addr full_ones = {
|
||||
.in6_u.u6_addr32 = {htonl(0xffffffff),
|
||||
htonl(0xffffffff),
|
||||
htonl(0xffffffff),
|
||||
htonl(0xffffffff)},
|
||||
.in6_u.u6_addr32 = {__constant_htonl(0xffffffff),
|
||||
__constant_htonl(0xffffffff),
|
||||
__constant_htonl(0xffffffff),
|
||||
__constant_htonl(0xffffffff)},
|
||||
};
|
||||
DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
|
||||
DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
|
||||
|
|
|
@ -1768,13 +1768,16 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
|
|||
param->wq.linear = 1;
|
||||
}
|
||||
|
||||
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
|
||||
static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
|
||||
struct mlx5e_rq_param *param)
|
||||
{
|
||||
void *rqc = param->rqc;
|
||||
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
|
||||
|
||||
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
|
||||
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
|
||||
|
||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
}
|
||||
|
||||
static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
|
||||
|
@ -2634,6 +2637,9 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
|
|||
struct mlx5e_cq *cq,
|
||||
struct mlx5e_cq_param *param)
|
||||
{
|
||||
param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
|
||||
|
||||
return mlx5e_alloc_cq_common(mdev, param, cq);
|
||||
}
|
||||
|
||||
|
@ -2645,7 +2651,7 @@ static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
|
|||
struct mlx5e_cq *cq = &drop_rq->cq;
|
||||
int err;
|
||||
|
||||
mlx5e_build_drop_rq_param(&rq_param);
|
||||
mlx5e_build_drop_rq_param(mdev, &rq_param);
|
||||
|
||||
err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
|
||||
if (err)
|
||||
|
@ -2994,8 +3000,8 @@ static int mlx5e_setup_tc_block(struct net_device *dev,
|
|||
}
|
||||
#endif
|
||||
|
||||
int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
switch (type) {
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/tcp.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include <net/busy_poll.h>
|
||||
#include <net/ip6_checksum.h>
|
||||
#include "en.h"
|
||||
#include "en_tc.h"
|
||||
#include "eswitch.h"
|
||||
|
@ -546,20 +547,33 @@ bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
|
||||
{
|
||||
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
||||
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
||||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
|
||||
|
||||
tcp->check = 0;
|
||||
tcp->psh = get_cqe_lro_tcppsh(cqe);
|
||||
|
||||
if (tcp_ack) {
|
||||
tcp->ack = 1;
|
||||
tcp->ack_seq = cqe->lro_ack_seq_num;
|
||||
tcp->window = cqe->lro_tcp_win;
|
||||
}
|
||||
}
|
||||
|
||||
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
||||
u32 cqe_bcnt)
|
||||
{
|
||||
struct ethhdr *eth = (struct ethhdr *)(skb->data);
|
||||
struct tcphdr *tcp;
|
||||
int network_depth = 0;
|
||||
__wsum check;
|
||||
__be16 proto;
|
||||
u16 tot_len;
|
||||
void *ip_p;
|
||||
|
||||
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
|
||||
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
|
||||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
|
||||
|
||||
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
|
||||
|
||||
tot_len = cqe_bcnt - network_depth;
|
||||
|
@ -576,23 +590,30 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
|
|||
ipv4->check = 0;
|
||||
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
|
||||
ipv4->ihl);
|
||||
|
||||
mlx5e_lro_update_tcp_hdr(cqe, tcp);
|
||||
check = csum_partial(tcp, tcp->doff * 4,
|
||||
csum_unfold((__force __sum16)cqe->check_sum));
|
||||
/* Almost done, don't forget the pseudo header */
|
||||
tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr,
|
||||
tot_len - sizeof(struct iphdr),
|
||||
IPPROTO_TCP, check);
|
||||
} else {
|
||||
u16 payload_len = tot_len - sizeof(struct ipv6hdr);
|
||||
struct ipv6hdr *ipv6 = ip_p;
|
||||
|
||||
tcp = ip_p + sizeof(struct ipv6hdr);
|
||||
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
|
||||
|
||||
ipv6->hop_limit = cqe->lro_min_ttl;
|
||||
ipv6->payload_len = cpu_to_be16(tot_len -
|
||||
sizeof(struct ipv6hdr));
|
||||
}
|
||||
ipv6->payload_len = cpu_to_be16(payload_len);
|
||||
|
||||
tcp->psh = get_cqe_lro_tcppsh(cqe);
|
||||
|
||||
if (tcp_ack) {
|
||||
tcp->ack = 1;
|
||||
tcp->ack_seq = cqe->lro_ack_seq_num;
|
||||
tcp->window = cqe->lro_tcp_win;
|
||||
mlx5e_lro_update_tcp_hdr(cqe, tcp);
|
||||
check = csum_partial(tcp, tcp->doff * 4,
|
||||
csum_unfold((__force __sum16)cqe->check_sum));
|
||||
/* Almost done, don't forget the pseudo header */
|
||||
tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len,
|
||||
IPPROTO_TCP, check);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -216,7 +216,8 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
|
|||
if (iph->protocol != IPPROTO_UDP)
|
||||
goto out;
|
||||
|
||||
udph = udp_hdr(skb);
|
||||
/* Don't assume skb_transport_header() was set */
|
||||
udph = (struct udphdr *)((u8 *)iph + 4 * iph->ihl);
|
||||
if (udph->dest != htons(9))
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -2529,7 +2529,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
|
|||
if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
|
||||
} else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
|
||||
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
|
||||
if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
|
||||
tcf_vlan_push_prio(a))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
|
||||
|
|
|
@ -176,7 +176,7 @@ static inline u16 mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
|
|||
default:
|
||||
hlen = mlx5e_skb_l2_header_offset(skb);
|
||||
}
|
||||
return min_t(u16, hlen, skb->len);
|
||||
return min_t(u16, hlen, skb_headlen(skb));
|
||||
}
|
||||
|
||||
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
|
||||
|
|
|
@ -1529,6 +1529,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
|
|||
|
||||
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
|
||||
|
||||
/* Create steering drop counters for ingress and egress ACLs */
|
||||
if (vport_num && esw->mode == SRIOV_LEGACY)
|
||||
esw_vport_create_drop_counters(vport);
|
||||
|
||||
/* Restore old vport configuration */
|
||||
esw_apply_vport_conf(esw, vport);
|
||||
|
||||
|
@ -1545,10 +1549,6 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
|
|||
if (!vport_num)
|
||||
vport->info.trusted = true;
|
||||
|
||||
/* create steering drop counters for ingress and egress ACLs */
|
||||
if (vport_num && esw->mode == SRIOV_LEGACY)
|
||||
esw_vport_create_drop_counters(vport);
|
||||
|
||||
esw_vport_change_handle_locked(vport);
|
||||
|
||||
esw->enabled_vports++;
|
||||
|
|
|
@ -1429,7 +1429,8 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
|
|||
|
||||
if (xored_actions & (MLX5_FLOW_CONTEXT_ACTION_DROP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_ENCAP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_DECAP))
|
||||
MLX5_FLOW_CONTEXT_ACTION_DECAP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
|
@ -1758,8 +1759,11 @@ search_again_locked:
|
|||
|
||||
/* Collect all fgs which has a matching match_criteria */
|
||||
err = build_match_list(&match_head, ft, spec);
|
||||
if (err)
|
||||
if (err) {
|
||||
if (take_write)
|
||||
up_write_ref_node(&ft->node);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
if (!take_write)
|
||||
up_read_ref_node(&ft->node);
|
||||
|
@ -1768,8 +1772,11 @@ search_again_locked:
|
|||
dest_num, version);
|
||||
free_match_list(&match_head);
|
||||
if (!IS_ERR(rule) ||
|
||||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
|
||||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
|
||||
if (take_write)
|
||||
up_write_ref_node(&ft->node);
|
||||
return rule;
|
||||
}
|
||||
|
||||
if (!take_write) {
|
||||
nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include <linux/highmem.h>
|
||||
#include <rdma/mlx5-abi.h>
|
||||
#include "en.h"
|
||||
#include "clock.h"
|
||||
|
||||
enum {
|
||||
MLX5_CYCLES_SHIFT = 23
|
||||
|
|
|
@ -551,7 +551,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
|
|||
MLX5_SET(cmd_hca_cap,
|
||||
set_hca_cap,
|
||||
cache_line_128byte,
|
||||
cache_line_size() == 128 ? 1 : 0);
|
||||
cache_line_size() >= 128 ? 1 : 0);
|
||||
|
||||
if (MLX5_CAP_GEN_MAX(dev, dct))
|
||||
MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
|
||||
|
|
|
@ -20,7 +20,7 @@ if NET_VENDOR_SMSC
|
|||
|
||||
config SMC9194
|
||||
tristate "SMC 9194 support"
|
||||
depends on (ISA || MAC && BROKEN)
|
||||
depends on ISA
|
||||
select CRC32
|
||||
---help---
|
||||
This is support for the SMC9xxx based Ethernet cards. Choose this
|
||||
|
|
|
@ -1451,7 +1451,7 @@ destroy_macvlan_port:
|
|||
/* the macvlan port may be freed by macvlan_uninit when fail to register.
|
||||
* so we destroy the macvlan port only when it's valid.
|
||||
*/
|
||||
if (create && macvlan_port_get_rtnl(dev))
|
||||
if (create && macvlan_port_get_rtnl(lowerdev))
|
||||
macvlan_port_destroy(port->dev);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -954,10 +954,11 @@ static int smsc75xx_set_features(struct net_device *netdev,
|
|||
/* it's racing here! */
|
||||
|
||||
ret = smsc75xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
netdev_warn(dev->net, "Error writing RFE_CTL\n");
|
||||
|
||||
return ret;
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
|
||||
|
|
|
@ -443,12 +443,8 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
|
|||
sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
|
||||
|
||||
err = virtqueue_add_outbuf(sq->vq, sq->sg, 1, xdp->data, GFP_ATOMIC);
|
||||
if (unlikely(err)) {
|
||||
struct page *page = virt_to_head_page(xdp->data);
|
||||
|
||||
put_page(page);
|
||||
return false;
|
||||
}
|
||||
if (unlikely(err))
|
||||
return false; /* Caller handle free/refcnt */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -456,8 +452,18 @@ static bool __virtnet_xdp_xmit(struct virtnet_info *vi,
|
|||
static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_buff *xdp)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
bool sent = __virtnet_xdp_xmit(vi, xdp);
|
||||
struct receive_queue *rq = vi->rq;
|
||||
struct bpf_prog *xdp_prog;
|
||||
bool sent;
|
||||
|
||||
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
||||
* indicate XDP resources have been successfully allocated.
|
||||
*/
|
||||
xdp_prog = rcu_dereference(rq->xdp_prog);
|
||||
if (!xdp_prog)
|
||||
return -ENXIO;
|
||||
|
||||
sent = __virtnet_xdp_xmit(vi, xdp);
|
||||
if (!sent)
|
||||
return -ENOSPC;
|
||||
return 0;
|
||||
|
@ -546,8 +552,11 @@ static struct sk_buff *receive_small(struct net_device *dev,
|
|||
unsigned int buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) +
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
struct page *page = virt_to_head_page(buf);
|
||||
unsigned int delta = 0, err;
|
||||
unsigned int delta = 0;
|
||||
struct page *xdp_page;
|
||||
bool sent;
|
||||
int err;
|
||||
|
||||
len -= vi->hdr_len;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -558,7 +567,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
|
|||
void *orig_data;
|
||||
u32 act;
|
||||
|
||||
if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
|
||||
if (unlikely(hdr->hdr.gso_type))
|
||||
goto err_xdp;
|
||||
|
||||
if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
|
||||
|
@ -596,16 +605,19 @@ static struct sk_buff *receive_small(struct net_device *dev,
|
|||
delta = orig_data - xdp.data;
|
||||
break;
|
||||
case XDP_TX:
|
||||
if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
|
||||
sent = __virtnet_xdp_xmit(vi, &xdp);
|
||||
if (unlikely(!sent)) {
|
||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||
else
|
||||
*xdp_xmit = true;
|
||||
goto err_xdp;
|
||||
}
|
||||
*xdp_xmit = true;
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(dev, &xdp, xdp_prog);
|
||||
if (!err)
|
||||
*xdp_xmit = true;
|
||||
if (err)
|
||||
goto err_xdp;
|
||||
*xdp_xmit = true;
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
default:
|
||||
|
@ -677,7 +689,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||
struct bpf_prog *xdp_prog;
|
||||
unsigned int truesize;
|
||||
unsigned int headroom = mergeable_ctx_to_headroom(ctx);
|
||||
int err;
|
||||
bool sent;
|
||||
|
||||
head_skb = NULL;
|
||||
|
||||
|
@ -746,20 +758,18 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
|||
}
|
||||
break;
|
||||
case XDP_TX:
|
||||
if (unlikely(!__virtnet_xdp_xmit(vi, &xdp)))
|
||||
sent = __virtnet_xdp_xmit(vi, &xdp);
|
||||
if (unlikely(!sent)) {
|
||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||
else
|
||||
*xdp_xmit = true;
|
||||
if (unlikely(xdp_page != page))
|
||||
put_page(xdp_page);
|
||||
goto err_xdp;
|
||||
}
|
||||
*xdp_xmit = true;
|
||||
if (unlikely(xdp_page != page))
|
||||
goto err_xdp;
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(dev, &xdp, xdp_prog);
|
||||
if (!err)
|
||||
*xdp_xmit = true;
|
||||
rcu_read_unlock();
|
||||
goto xdp_xmit;
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(act);
|
||||
case XDP_ABORTED:
|
||||
|
|
|
@ -3516,7 +3516,7 @@ static int __init init_mac80211_hwsim(void)
|
|||
|
||||
spin_lock_init(&hwsim_radio_lock);
|
||||
|
||||
hwsim_wq = alloc_workqueue("hwsim_wq",WQ_MEM_RECLAIM,0);
|
||||
hwsim_wq = alloc_workqueue("hwsim_wq", 0, 0);
|
||||
if (!hwsim_wq)
|
||||
return -ENOMEM;
|
||||
rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params);
|
||||
|
|
|
@ -4149,7 +4149,7 @@ void ieee80211_sta_uapsd_trigger(struct ieee80211_sta *sta, u8 tid);
|
|||
* The TX headroom reserved by mac80211 for its own tx_status functions.
|
||||
* This is enough for the radiotap header.
|
||||
*/
|
||||
#define IEEE80211_TX_STATUS_HEADROOM 14
|
||||
#define IEEE80211_TX_STATUS_HEADROOM ALIGN(14, 4)
|
||||
|
||||
/**
|
||||
* ieee80211_sta_set_buffered - inform mac80211 about driver-buffered frames
|
||||
|
|
|
@ -78,7 +78,7 @@ struct regulatory_request {
|
|||
int wiphy_idx;
|
||||
enum nl80211_reg_initiator initiator;
|
||||
enum nl80211_user_reg_hint_type user_reg_hint_type;
|
||||
char alpha2[2];
|
||||
char alpha2[3];
|
||||
enum nl80211_dfs_regions dfs_region;
|
||||
bool intersect;
|
||||
bool processed;
|
||||
|
|
|
@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < array->map.max_entries; i++)
|
||||
for (i = 0; i < array->map.max_entries; i++) {
|
||||
free_percpu(array->pptrs[i]);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
|
@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
|
|||
return -ENOMEM;
|
||||
}
|
||||
array->pptrs[i] = ptr;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
|
|||
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
int ret, numa_node = bpf_map_attr_numa_node(attr);
|
||||
u32 elem_size, index_mask, max_entries;
|
||||
bool unpriv = !capable(CAP_SYS_ADMIN);
|
||||
u64 cost, array_size, mask64;
|
||||
struct bpf_array *array;
|
||||
u64 array_size, mask64;
|
||||
|
||||
elem_size = round_up(attr->value_size, 8);
|
||||
|
||||
|
@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
array_size += (u64) max_entries * elem_size;
|
||||
|
||||
/* make sure there is no u32 overflow later in round_up() */
|
||||
if (array_size >= U32_MAX - PAGE_SIZE)
|
||||
cost = array_size;
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (percpu) {
|
||||
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
ret = bpf_map_precharge_memlock(cost);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* allocate all map elements and zero-initialize them */
|
||||
array = bpf_map_area_alloc(array_size, numa_node);
|
||||
|
@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
|||
|
||||
/* copy mandatory map attributes */
|
||||
bpf_map_init_from_attr(&array->map, attr);
|
||||
array->map.pages = cost;
|
||||
array->elem_size = elem_size;
|
||||
|
||||
if (!percpu)
|
||||
goto out;
|
||||
|
||||
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
|
||||
|
||||
if (array_size >= U32_MAX - PAGE_SIZE ||
|
||||
bpf_array_alloc_percpu(array)) {
|
||||
if (percpu && bpf_array_alloc_percpu(array)) {
|
||||
bpf_map_area_free(array);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
out:
|
||||
array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
return &array->map;
|
||||
}
|
||||
|
|
|
@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
|||
* so always copy 'cnt' prog_ids to the user.
|
||||
* In a rare race the user will see zero prog_ids
|
||||
*/
|
||||
ids = kcalloc(cnt, sizeof(u32), GFP_USER);
|
||||
ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
|
||||
if (!ids)
|
||||
return -ENOMEM;
|
||||
rcu_read_lock();
|
||||
|
|
|
@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
|
|||
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
|
||||
int map_id)
|
||||
{
|
||||
gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
|
||||
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
|
||||
struct bpf_cpu_map_entry *rcpu;
|
||||
int numa, err;
|
||||
|
||||
|
|
|
@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
|
|||
struct lpm_trie_node __rcu **slot;
|
||||
struct lpm_trie_node *node;
|
||||
|
||||
raw_spin_lock(&trie->lock);
|
||||
/* Wait for outstanding programs to complete
|
||||
* update/lookup/delete/get_next_key and free the trie.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
/* Always start at the root and walk down to a node that has no
|
||||
* children. Then free that node, nullify its reference in the parent
|
||||
|
@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map)
|
|||
slot = &trie->root;
|
||||
|
||||
for (;;) {
|
||||
node = rcu_dereference_protected(*slot,
|
||||
lockdep_is_held(&trie->lock));
|
||||
node = rcu_dereference_protected(*slot, 1);
|
||||
if (!node)
|
||||
goto unlock;
|
||||
goto out;
|
||||
|
||||
if (rcu_access_pointer(node->child[0])) {
|
||||
slot = &node->child[0];
|
||||
|
@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map)
|
|||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
raw_spin_unlock(&trie->lock);
|
||||
out:
|
||||
kfree(trie);
|
||||
}
|
||||
|
||||
static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
|
||||
|
|
|
@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
|
|||
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_stab *stab;
|
||||
int err = -EINVAL;
|
||||
u64 cost;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
|
@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
|||
|
||||
/* make sure page count doesn't overflow */
|
||||
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
|
||||
err = -EINVAL;
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
goto free_stab;
|
||||
|
||||
|
|
|
@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
|||
return -EINVAL;
|
||||
if (copy_from_user(&query, uquery, sizeof(query)))
|
||||
return -EFAULT;
|
||||
if (query.ids_len > BPF_TRACE_MAX_PROGS)
|
||||
return -E2BIG;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
|
||||
|
|
|
@ -187,17 +187,17 @@ static int ebt_among_mt_check(const struct xt_mtchk_param *par)
|
|||
expected_length += ebt_mac_wormhash_size(wh_src);
|
||||
|
||||
if (em->match_size != EBT_ALIGN(expected_length)) {
|
||||
pr_info("wrong size: %d against expected %d, rounded to %zd\n",
|
||||
em->match_size, expected_length,
|
||||
EBT_ALIGN(expected_length));
|
||||
pr_err_ratelimited("wrong size: %d against expected %d, rounded to %zd\n",
|
||||
em->match_size, expected_length,
|
||||
EBT_ALIGN(expected_length));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (wh_dst && (err = ebt_mac_wormhash_check_integrity(wh_dst))) {
|
||||
pr_info("dst integrity fail: %x\n", -err);
|
||||
pr_err_ratelimited("dst integrity fail: %x\n", -err);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (wh_src && (err = ebt_mac_wormhash_check_integrity(wh_src))) {
|
||||
pr_info("src integrity fail: %x\n", -err);
|
||||
pr_err_ratelimited("src integrity fail: %x\n", -err);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -72,8 +72,8 @@ static int ebt_limit_mt_check(const struct xt_mtchk_param *par)
|
|||
/* Check for overflow. */
|
||||
if (info->burst == 0 ||
|
||||
user2credits(info->avg * info->burst) < user2credits(info->avg)) {
|
||||
pr_info("overflow, try lower: %u/%u\n",
|
||||
info->avg, info->burst);
|
||||
pr_info_ratelimited("overflow, try lower: %u/%u\n",
|
||||
info->avg, info->burst);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -3381,17 +3381,13 @@ BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
|
|||
struct sock *sk = bpf_sock->sk;
|
||||
int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
|
||||
|
||||
if (!sk_fullsock(sk))
|
||||
if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
if (val)
|
||||
tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
|
||||
|
||||
return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
|
||||
#else
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
|
||||
|
|
|
@ -66,6 +66,7 @@ struct net_rate_estimator {
|
|||
static void est_fetch_counters(struct net_rate_estimator *e,
|
||||
struct gnet_stats_basic_packed *b)
|
||||
{
|
||||
memset(b, 0, sizeof(*b));
|
||||
if (e->stats_lock)
|
||||
spin_lock(e->stats_lock);
|
||||
|
||||
|
|
|
@ -1567,10 +1567,7 @@ int ip_getsockopt(struct sock *sk, int level,
|
|||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
lock_sock(sk);
|
||||
err = nf_getsockopt(sk, PF_INET, optname, optval,
|
||||
&len);
|
||||
release_sock(sk);
|
||||
err = nf_getsockopt(sk, PF_INET, optname, optval, &len);
|
||||
if (err >= 0)
|
||||
err = put_user(len, optlen);
|
||||
return err;
|
||||
|
@ -1602,9 +1599,7 @@ int compat_ip_getsockopt(struct sock *sk, int level, int optname,
|
|||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
lock_sock(sk);
|
||||
err = compat_nf_getsockopt(sk, PF_INET, optname, optval, &len);
|
||||
release_sock(sk);
|
||||
if (err >= 0)
|
||||
err = put_user(len, optlen);
|
||||
return err;
|
||||
|
|
|
@ -252,6 +252,10 @@ unsigned int arpt_do_table(struct sk_buff *skb,
|
|||
}
|
||||
if (table_base + v
|
||||
!= arpt_next_entry(e)) {
|
||||
if (unlikely(stackidx >= private->stacksize)) {
|
||||
verdict = NF_DROP;
|
||||
break;
|
||||
}
|
||||
jumpstack[stackidx++] = e;
|
||||
}
|
||||
|
||||
|
|
|
@ -330,8 +330,13 @@ ipt_do_table(struct sk_buff *skb,
|
|||
continue;
|
||||
}
|
||||
if (table_base + v != ipt_next_entry(e) &&
|
||||
!(e->ip.flags & IPT_F_GOTO))
|
||||
!(e->ip.flags & IPT_F_GOTO)) {
|
||||
if (unlikely(stackidx >= private->stacksize)) {
|
||||
verdict = NF_DROP;
|
||||
break;
|
||||
}
|
||||
jumpstack[stackidx++] = e;
|
||||
}
|
||||
|
||||
e = get_entry(table_base, v);
|
||||
continue;
|
||||
|
|
|
@ -107,12 +107,6 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
|
|||
|
||||
local_bh_disable();
|
||||
if (refcount_dec_and_lock(&c->entries, &cn->lock)) {
|
||||
list_del_rcu(&c->list);
|
||||
spin_unlock(&cn->lock);
|
||||
local_bh_enable();
|
||||
|
||||
unregister_netdevice_notifier(&c->notifier);
|
||||
|
||||
/* In case anyone still accesses the file, the open/close
|
||||
* functions are also incrementing the refcount on their own,
|
||||
* so it's safe to remove the entry even if it's in use. */
|
||||
|
@ -120,6 +114,12 @@ clusterip_config_entry_put(struct net *net, struct clusterip_config *c)
|
|||
if (cn->procdir)
|
||||
proc_remove(c->pde);
|
||||
#endif
|
||||
list_del_rcu(&c->list);
|
||||
spin_unlock(&cn->lock);
|
||||
local_bh_enable();
|
||||
|
||||
unregister_netdevice_notifier(&c->notifier);
|
||||
|
||||
return;
|
||||
}
|
||||
local_bh_enable();
|
||||
|
@ -154,8 +154,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
|
|||
#endif
|
||||
if (unlikely(!refcount_inc_not_zero(&c->refcount)))
|
||||
c = NULL;
|
||||
else if (entry)
|
||||
refcount_inc(&c->entries);
|
||||
else if (entry) {
|
||||
if (unlikely(!refcount_inc_not_zero(&c->entries))) {
|
||||
clusterip_config_put(c);
|
||||
c = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
rcu_read_unlock_bh();
|
||||
|
||||
|
|
|
@ -98,17 +98,15 @@ static int ecn_tg_check(const struct xt_tgchk_param *par)
|
|||
const struct ipt_ECN_info *einfo = par->targinfo;
|
||||
const struct ipt_entry *e = par->entryinfo;
|
||||
|
||||
if (einfo->operation & IPT_ECN_OP_MASK) {
|
||||
pr_info("unsupported ECN operation %x\n", einfo->operation);
|
||||
if (einfo->operation & IPT_ECN_OP_MASK)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (einfo->ip_ect & ~IPT_ECN_IP_MASK) {
|
||||
pr_info("new ECT codepoint %x out of mask\n", einfo->ip_ect);
|
||||
|
||||
if (einfo->ip_ect & ~IPT_ECN_IP_MASK)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((einfo->operation & (IPT_ECN_OP_SET_ECE|IPT_ECN_OP_SET_CWR)) &&
|
||||
(e->ip.proto != IPPROTO_TCP || (e->ip.invflags & XT_INV_PROTO))) {
|
||||
pr_info("cannot use TCP operations on a non-tcp rule\n");
|
||||
pr_info_ratelimited("cannot use operation on non-tcp rule\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -74,13 +74,13 @@ static int reject_tg_check(const struct xt_tgchk_param *par)
|
|||
const struct ipt_entry *e = par->entryinfo;
|
||||
|
||||
if (rejinfo->with == IPT_ICMP_ECHOREPLY) {
|
||||
pr_info("ECHOREPLY no longer supported.\n");
|
||||
pr_info_ratelimited("ECHOREPLY no longer supported.\n");
|
||||
return -EINVAL;
|
||||
} else if (rejinfo->with == IPT_TCP_RESET) {
|
||||
/* Must specify that it's a TCP packet */
|
||||
if (e->ip.proto != IPPROTO_TCP ||
|
||||
(e->ip.invflags & XT_INV_PROTO)) {
|
||||
pr_info("TCP_RESET invalid for non-tcp\n");
|
||||
pr_info_ratelimited("TCP_RESET invalid for non-tcp\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -105,14 +105,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
|
|||
const struct xt_rpfilter_info *info = par->matchinfo;
|
||||
unsigned int options = ~XT_RPFILTER_OPTION_MASK;
|
||||
if (info->flags & options) {
|
||||
pr_info("unknown options encountered");
|
||||
pr_info_ratelimited("unknown options\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strcmp(par->table, "mangle") != 0 &&
|
||||
strcmp(par->table, "raw") != 0) {
|
||||
pr_info("match only valid in the \'raw\' "
|
||||
"or \'mangle\' tables, not \'%s\'.\n", par->table);
|
||||
pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
|
||||
par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -1826,6 +1826,8 @@ int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
|
|||
return skb_get_hash_raw(skb) >> 1;
|
||||
memset(&hash_keys, 0, sizeof(hash_keys));
|
||||
skb_flow_dissect_flow_keys(skb, &keys, flag);
|
||||
|
||||
hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
||||
hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
|
||||
hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
|
||||
hash_keys.ports.src = keys.ports.src;
|
||||
|
|
|
@ -1730,7 +1730,7 @@ u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
|
|||
*/
|
||||
segs = max_t(u32, bytes / mss_now, min_tso_segs);
|
||||
|
||||
return min_t(u32, segs, sk->sk_gso_max_segs);
|
||||
return segs;
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_tso_autosize);
|
||||
|
||||
|
@ -1742,9 +1742,10 @@ static u32 tcp_tso_segs(struct sock *sk, unsigned int mss_now)
|
|||
const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
|
||||
u32 tso_segs = ca_ops->tso_segs_goal ? ca_ops->tso_segs_goal(sk) : 0;
|
||||
|
||||
return tso_segs ? :
|
||||
tcp_tso_autosize(sk, mss_now,
|
||||
sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
|
||||
if (!tso_segs)
|
||||
tso_segs = tcp_tso_autosize(sk, mss_now,
|
||||
sock_net(sk)->ipv4.sysctl_tcp_min_tso_segs);
|
||||
return min_t(u32, tso_segs, sk->sk_gso_max_segs);
|
||||
}
|
||||
|
||||
/* Returns the portion of skb which can be sent right away */
|
||||
|
|
|
@ -1367,10 +1367,7 @@ int ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
lock_sock(sk);
|
||||
err = nf_getsockopt(sk, PF_INET6, optname, optval,
|
||||
&len);
|
||||
release_sock(sk);
|
||||
err = nf_getsockopt(sk, PF_INET6, optname, optval, &len);
|
||||
if (err >= 0)
|
||||
err = put_user(len, optlen);
|
||||
}
|
||||
|
@ -1409,10 +1406,7 @@ int compat_ipv6_getsockopt(struct sock *sk, int level, int optname,
|
|||
if (get_user(len, optlen))
|
||||
return -EFAULT;
|
||||
|
||||
lock_sock(sk);
|
||||
err = compat_nf_getsockopt(sk, PF_INET6,
|
||||
optname, optval, &len);
|
||||
release_sock(sk);
|
||||
err = compat_nf_getsockopt(sk, PF_INET6, optname, optval, &len);
|
||||
if (err >= 0)
|
||||
err = put_user(len, optlen);
|
||||
}
|
||||
|
|
|
@ -352,6 +352,10 @@ ip6t_do_table(struct sk_buff *skb,
|
|||
}
|
||||
if (table_base + v != ip6t_next_entry(e) &&
|
||||
!(e->ipv6.flags & IP6T_F_GOTO)) {
|
||||
if (unlikely(stackidx >= private->stacksize)) {
|
||||
verdict = NF_DROP;
|
||||
break;
|
||||
}
|
||||
jumpstack[stackidx++] = e;
|
||||
}
|
||||
|
||||
|
|
|
@ -85,14 +85,14 @@ static int reject_tg6_check(const struct xt_tgchk_param *par)
|
|||
const struct ip6t_entry *e = par->entryinfo;
|
||||
|
||||
if (rejinfo->with == IP6T_ICMP6_ECHOREPLY) {
|
||||
pr_info("ECHOREPLY is not supported.\n");
|
||||
pr_info_ratelimited("ECHOREPLY is not supported\n");
|
||||
return -EINVAL;
|
||||
} else if (rejinfo->with == IP6T_TCP_RESET) {
|
||||
/* Must specify that it's a TCP packet */
|
||||
if (!(e->ipv6.flags & IP6T_F_PROTO) ||
|
||||
e->ipv6.proto != IPPROTO_TCP ||
|
||||
(e->ipv6.invflags & XT_INV_PROTO)) {
|
||||
pr_info("TCP_RESET illegal for non-tcp\n");
|
||||
pr_info_ratelimited("TCP_RESET illegal for non-tcp\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,14 +103,14 @@ static int rpfilter_check(const struct xt_mtchk_param *par)
|
|||
unsigned int options = ~XT_RPFILTER_OPTION_MASK;
|
||||
|
||||
if (info->flags & options) {
|
||||
pr_info("unknown options encountered");
|
||||
pr_info_ratelimited("unknown options\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (strcmp(par->table, "mangle") != 0 &&
|
||||
strcmp(par->table, "raw") != 0) {
|
||||
pr_info("match only valid in the \'raw\' "
|
||||
"or \'mangle\' tables, not \'%s\'.\n", par->table);
|
||||
pr_info_ratelimited("only valid in \'raw\' or \'mangle\' table, not \'%s\'\n",
|
||||
par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -122,12 +122,14 @@ static int srh_mt6_check(const struct xt_mtchk_param *par)
|
|||
const struct ip6t_srh *srhinfo = par->matchinfo;
|
||||
|
||||
if (srhinfo->mt_flags & ~IP6T_SRH_MASK) {
|
||||
pr_err("unknown srh match flags %X\n", srhinfo->mt_flags);
|
||||
pr_info_ratelimited("unknown srh match flags %X\n",
|
||||
srhinfo->mt_flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (srhinfo->mt_invflags & ~IP6T_SRH_INV_MASK) {
|
||||
pr_err("unknown srh invflags %X\n", srhinfo->mt_invflags);
|
||||
pr_info_ratelimited("unknown srh invflags %X\n",
|
||||
srhinfo->mt_invflags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -182,7 +182,7 @@ static void ipip6_tunnel_clone_6rd(struct net_device *dev, struct sit_net *sitn)
|
|||
#ifdef CONFIG_IPV6_SIT_6RD
|
||||
struct ip_tunnel *t = netdev_priv(dev);
|
||||
|
||||
if (t->dev == sitn->fb_tunnel_dev) {
|
||||
if (dev == sitn->fb_tunnel_dev) {
|
||||
ipv6_addr_set(&t->ip6rd.prefix, htonl(0x20020000), 0, 0, 0);
|
||||
t->ip6rd.relay_prefix = 0;
|
||||
t->ip6rd.prefixlen = 16;
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
||||
* Copyright 2007-2010, Intel Corporation
|
||||
* Copyright(c) 2015-2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -304,9 +305,6 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
|
|||
* driver so reject the timeout update.
|
||||
*/
|
||||
status = WLAN_STATUS_REQUEST_DECLINED;
|
||||
ieee80211_send_addba_resp(sta->sdata, sta->sta.addr,
|
||||
tid, dialog_token, status,
|
||||
1, buf_size, timeout);
|
||||
goto end;
|
||||
}
|
||||
|
||||
|
|
|
@ -2892,7 +2892,7 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
|
|||
}
|
||||
if (beacon->probe_resp_len) {
|
||||
new_beacon->probe_resp_len = beacon->probe_resp_len;
|
||||
beacon->probe_resp = pos;
|
||||
new_beacon->probe_resp = pos;
|
||||
memcpy(pos, beacon->probe_resp, beacon->probe_resp_len);
|
||||
pos += beacon->probe_resp_len;
|
||||
}
|
||||
|
|
|
@ -1467,7 +1467,7 @@ struct ieee802_11_elems {
|
|||
const struct ieee80211_timeout_interval_ie *timeout_int;
|
||||
const u8 *opmode_notif;
|
||||
const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
|
||||
const struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
|
||||
struct ieee80211_mesh_chansw_params_ie *mesh_chansw_params_ie;
|
||||
const struct ieee80211_bss_max_idle_period_ie *max_idle_period_ie;
|
||||
|
||||
/* length of them, respectively */
|
||||
|
|
|
@ -1255,13 +1255,12 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
|
|||
}
|
||||
|
||||
static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
|
||||
struct ieee80211_mgmt *mgmt, size_t len)
|
||||
struct ieee80211_mgmt *mgmt, size_t len,
|
||||
struct ieee802_11_elems *elems)
|
||||
{
|
||||
struct ieee80211_mgmt *mgmt_fwd;
|
||||
struct sk_buff *skb;
|
||||
struct ieee80211_local *local = sdata->local;
|
||||
u8 *pos = mgmt->u.action.u.chan_switch.variable;
|
||||
size_t offset_ttl;
|
||||
|
||||
skb = dev_alloc_skb(local->tx_headroom + len);
|
||||
if (!skb)
|
||||
|
@ -1269,13 +1268,9 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
|
|||
skb_reserve(skb, local->tx_headroom);
|
||||
mgmt_fwd = skb_put(skb, len);
|
||||
|
||||
/* offset_ttl is based on whether the secondary channel
|
||||
* offset is available or not. Subtract 1 from the mesh TTL
|
||||
* and disable the initiator flag before forwarding.
|
||||
*/
|
||||
offset_ttl = (len < 42) ? 7 : 10;
|
||||
*(pos + offset_ttl) -= 1;
|
||||
*(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
|
||||
elems->mesh_chansw_params_ie->mesh_ttl--;
|
||||
elems->mesh_chansw_params_ie->mesh_flags &=
|
||||
~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
|
||||
|
||||
memcpy(mgmt_fwd, mgmt, len);
|
||||
eth_broadcast_addr(mgmt_fwd->da);
|
||||
|
@ -1323,7 +1318,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
/* forward or re-broadcast the CSA frame */
|
||||
if (fwd_csa) {
|
||||
if (mesh_fwd_csa_frame(sdata, mgmt, len) < 0)
|
||||
if (mesh_fwd_csa_frame(sdata, mgmt, len, &elems) < 0)
|
||||
mcsa_dbg(sdata, "Failed to forward the CSA frame");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
||||
* Copyright 2007-2008, Intel Corporation
|
||||
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
|
||||
* Copyright (C) 2018 Intel Corporation
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
|
@ -27,7 +28,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
|||
u32 sta_flags, u8 *bssid,
|
||||
struct ieee80211_csa_ie *csa_ie)
|
||||
{
|
||||
enum nl80211_band new_band;
|
||||
enum nl80211_band new_band = current_band;
|
||||
int new_freq;
|
||||
u8 new_chan_no;
|
||||
struct ieee80211_channel *new_chan;
|
||||
|
@ -55,15 +56,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
|
|||
elems->ext_chansw_ie->new_operating_class,
|
||||
&new_band)) {
|
||||
sdata_info(sdata,
|
||||
"cannot understand ECSA IE operating class %d, disconnecting\n",
|
||||
"cannot understand ECSA IE operating class, %d, ignoring\n",
|
||||
elems->ext_chansw_ie->new_operating_class);
|
||||
return -EINVAL;
|
||||
}
|
||||
new_chan_no = elems->ext_chansw_ie->new_ch_num;
|
||||
csa_ie->count = elems->ext_chansw_ie->count;
|
||||
csa_ie->mode = elems->ext_chansw_ie->mode;
|
||||
} else if (elems->ch_switch_ie) {
|
||||
new_band = current_band;
|
||||
new_chan_no = elems->ch_switch_ie->new_ch_num;
|
||||
csa_ie->count = elems->ch_switch_ie->count;
|
||||
csa_ie->mode = elems->ch_switch_ie->mode;
|
||||
|
|
|
@ -314,7 +314,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
|
|||
|
||||
if (ieee80211_hw_check(hw, USES_RSS)) {
|
||||
sta->pcpu_rx_stats =
|
||||
alloc_percpu(struct ieee80211_sta_rx_stats);
|
||||
alloc_percpu_gfp(struct ieee80211_sta_rx_stats, gfp);
|
||||
if (!sta->pcpu_rx_stats)
|
||||
goto free;
|
||||
}
|
||||
|
@ -433,6 +433,7 @@ free_txq:
|
|||
if (sta->sta.txq[0])
|
||||
kfree(to_txq_info(sta->sta.txq[0]));
|
||||
free:
|
||||
free_percpu(sta->pcpu_rx_stats);
|
||||
#ifdef CONFIG_MAC80211_MESH
|
||||
kfree(sta->mesh);
|
||||
#endif
|
||||
|
|
|
@ -41,7 +41,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
|
|||
const struct nf_conn *ct,
|
||||
u16 *rover)
|
||||
{
|
||||
unsigned int range_size, min, i;
|
||||
unsigned int range_size, min, max, i;
|
||||
__be16 *portptr;
|
||||
u_int16_t off;
|
||||
|
||||
|
@ -71,7 +71,10 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
|
|||
}
|
||||
} else {
|
||||
min = ntohs(range->min_proto.all);
|
||||
range_size = ntohs(range->max_proto.all) - min + 1;
|
||||
max = ntohs(range->max_proto.all);
|
||||
if (unlikely(max < min))
|
||||
swap(max, min);
|
||||
range_size = max - min + 1;
|
||||
}
|
||||
|
||||
if (range->flags & NF_NAT_RANGE_PROTO_RANDOM) {
|
||||
|
|
|
@ -434,36 +434,35 @@ int xt_check_match(struct xt_mtchk_param *par,
|
|||
* ebt_among is exempt from centralized matchsize checking
|
||||
* because it uses a dynamic-size data set.
|
||||
*/
|
||||
pr_err("%s_tables: %s.%u match: invalid size "
|
||||
"%u (kernel) != (user) %u\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
par->match->revision,
|
||||
XT_ALIGN(par->match->matchsize), size);
|
||||
pr_err_ratelimited("%s_tables: %s.%u match: invalid size %u (kernel) != (user) %u\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
par->match->revision,
|
||||
XT_ALIGN(par->match->matchsize), size);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->match->table != NULL &&
|
||||
strcmp(par->match->table, par->table) != 0) {
|
||||
pr_err("%s_tables: %s match: only valid in %s table, not %s\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
par->match->table, par->table);
|
||||
pr_info_ratelimited("%s_tables: %s match: only valid in %s table, not %s\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
par->match->table, par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->match->hooks && (par->hook_mask & ~par->match->hooks) != 0) {
|
||||
char used[64], allow[64];
|
||||
|
||||
pr_err("%s_tables: %s match: used from hooks %s, but only "
|
||||
"valid from %s\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
textify_hooks(used, sizeof(used), par->hook_mask,
|
||||
par->family),
|
||||
textify_hooks(allow, sizeof(allow), par->match->hooks,
|
||||
par->family));
|
||||
pr_info_ratelimited("%s_tables: %s match: used from hooks %s, but only valid from %s\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
textify_hooks(used, sizeof(used),
|
||||
par->hook_mask, par->family),
|
||||
textify_hooks(allow, sizeof(allow),
|
||||
par->match->hooks,
|
||||
par->family));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->match->proto && (par->match->proto != proto || inv_proto)) {
|
||||
pr_err("%s_tables: %s match: only valid for protocol %u\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
par->match->proto);
|
||||
pr_info_ratelimited("%s_tables: %s match: only valid for protocol %u\n",
|
||||
xt_prefix[par->family], par->match->name,
|
||||
par->match->proto);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->match->checkentry != NULL) {
|
||||
|
@ -814,36 +813,35 @@ int xt_check_target(struct xt_tgchk_param *par,
|
|||
int ret;
|
||||
|
||||
if (XT_ALIGN(par->target->targetsize) != size) {
|
||||
pr_err("%s_tables: %s.%u target: invalid size "
|
||||
"%u (kernel) != (user) %u\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
par->target->revision,
|
||||
XT_ALIGN(par->target->targetsize), size);
|
||||
pr_err_ratelimited("%s_tables: %s.%u target: invalid size %u (kernel) != (user) %u\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
par->target->revision,
|
||||
XT_ALIGN(par->target->targetsize), size);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->target->table != NULL &&
|
||||
strcmp(par->target->table, par->table) != 0) {
|
||||
pr_err("%s_tables: %s target: only valid in %s table, not %s\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
par->target->table, par->table);
|
||||
pr_info_ratelimited("%s_tables: %s target: only valid in %s table, not %s\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
par->target->table, par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->target->hooks && (par->hook_mask & ~par->target->hooks) != 0) {
|
||||
char used[64], allow[64];
|
||||
|
||||
pr_err("%s_tables: %s target: used from hooks %s, but only "
|
||||
"usable from %s\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
textify_hooks(used, sizeof(used), par->hook_mask,
|
||||
par->family),
|
||||
textify_hooks(allow, sizeof(allow), par->target->hooks,
|
||||
par->family));
|
||||
pr_info_ratelimited("%s_tables: %s target: used from hooks %s, but only usable from %s\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
textify_hooks(used, sizeof(used),
|
||||
par->hook_mask, par->family),
|
||||
textify_hooks(allow, sizeof(allow),
|
||||
par->target->hooks,
|
||||
par->family));
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->target->proto && (par->target->proto != proto || inv_proto)) {
|
||||
pr_err("%s_tables: %s target: only valid for protocol %u\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
par->target->proto);
|
||||
pr_info_ratelimited("%s_tables: %s target: only valid for protocol %u\n",
|
||||
xt_prefix[par->family], par->target->name,
|
||||
par->target->proto);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->target->checkentry != NULL) {
|
||||
|
@ -1004,10 +1002,6 @@ struct xt_table_info *xt_alloc_table_info(unsigned int size)
|
|||
if (sz < sizeof(*info))
|
||||
return NULL;
|
||||
|
||||
/* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
|
||||
if ((size >> PAGE_SHIFT) + 2 > totalram_pages)
|
||||
return NULL;
|
||||
|
||||
/* __GFP_NORETRY is not fully supported by kvmalloc but it should
|
||||
* work reasonably well if sz is too large and bail out rather
|
||||
* than shoot all processes down before realizing there is nothing
|
||||
|
|
|
@ -120,8 +120,8 @@ static int audit_tg_check(const struct xt_tgchk_param *par)
|
|||
const struct xt_audit_info *info = par->targinfo;
|
||||
|
||||
if (info->type > XT_AUDIT_TYPE_MAX) {
|
||||
pr_info("Audit type out of range (valid range: 0..%hhu)\n",
|
||||
XT_AUDIT_TYPE_MAX);
|
||||
pr_info_ratelimited("Audit type out of range (valid range: 0..%hhu)\n",
|
||||
XT_AUDIT_TYPE_MAX);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,13 +36,13 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
|
|||
const struct xt_CHECKSUM_info *einfo = par->targinfo;
|
||||
|
||||
if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
|
||||
pr_info("unsupported CHECKSUM operation %x\n", einfo->operation);
|
||||
pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
|
||||
einfo->operation);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (!einfo->operation) {
|
||||
pr_info("no CHECKSUM operation enabled\n");
|
||||
if (!einfo->operation)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -91,8 +91,8 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
|
|||
|
||||
if (strcmp(par->table, "mangle") != 0 &&
|
||||
strcmp(par->table, "security") != 0) {
|
||||
pr_info("target only valid in the \'mangle\' "
|
||||
"or \'security\' tables, not \'%s\'.\n", par->table);
|
||||
pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
|
||||
par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -102,14 +102,14 @@ static int connsecmark_tg_check(const struct xt_tgchk_param *par)
|
|||
break;
|
||||
|
||||
default:
|
||||
pr_info("invalid mode: %hu\n", info->mode);
|
||||
pr_info_ratelimited("invalid mode: %hu\n", info->mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0)
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -82,15 +82,14 @@ xt_ct_set_helper(struct nf_conn *ct, const char *helper_name,
|
|||
|
||||
proto = xt_ct_find_proto(par);
|
||||
if (!proto) {
|
||||
pr_info("You must specify a L4 protocol, and not use "
|
||||
"inversions on it.\n");
|
||||
pr_info_ratelimited("You must specify a L4 protocol and not use inversions on it\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
helper = nf_conntrack_helper_try_module_get(helper_name, par->family,
|
||||
proto);
|
||||
if (helper == NULL) {
|
||||
pr_info("No such helper \"%s\"\n", helper_name);
|
||||
pr_info_ratelimited("No such helper \"%s\"\n", helper_name);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
|
@ -124,6 +123,7 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
|
|||
const struct nf_conntrack_l4proto *l4proto;
|
||||
struct ctnl_timeout *timeout;
|
||||
struct nf_conn_timeout *timeout_ext;
|
||||
const char *errmsg = NULL;
|
||||
int ret = 0;
|
||||
u8 proto;
|
||||
|
||||
|
@ -131,29 +131,29 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
|
|||
timeout_find_get = rcu_dereference(nf_ct_timeout_find_get_hook);
|
||||
if (timeout_find_get == NULL) {
|
||||
ret = -ENOENT;
|
||||
pr_info("Timeout policy base is empty\n");
|
||||
errmsg = "Timeout policy base is empty";
|
||||
goto out;
|
||||
}
|
||||
|
||||
proto = xt_ct_find_proto(par);
|
||||
if (!proto) {
|
||||
ret = -EINVAL;
|
||||
pr_info("You must specify a L4 protocol, and not use "
|
||||
"inversions on it.\n");
|
||||
errmsg = "You must specify a L4 protocol and not use inversions on it";
|
||||
goto out;
|
||||
}
|
||||
|
||||
timeout = timeout_find_get(par->net, timeout_name);
|
||||
if (timeout == NULL) {
|
||||
ret = -ENOENT;
|
||||
pr_info("No such timeout policy \"%s\"\n", timeout_name);
|
||||
pr_info_ratelimited("No such timeout policy \"%s\"\n",
|
||||
timeout_name);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (timeout->l3num != par->family) {
|
||||
ret = -EINVAL;
|
||||
pr_info("Timeout policy `%s' can only be used by L3 protocol "
|
||||
"number %d\n", timeout_name, timeout->l3num);
|
||||
pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
|
||||
timeout_name, 3, timeout->l3num);
|
||||
goto err_put_timeout;
|
||||
}
|
||||
/* Make sure the timeout policy matches any existing protocol tracker,
|
||||
|
@ -162,9 +162,8 @@ xt_ct_set_timeout(struct nf_conn *ct, const struct xt_tgchk_param *par,
|
|||
l4proto = __nf_ct_l4proto_find(par->family, proto);
|
||||
if (timeout->l4proto->l4proto != l4proto->l4proto) {
|
||||
ret = -EINVAL;
|
||||
pr_info("Timeout policy `%s' can only be used by L4 protocol "
|
||||
"number %d\n",
|
||||
timeout_name, timeout->l4proto->l4proto);
|
||||
pr_info_ratelimited("Timeout policy `%s' can only be used by L%d protocol number %d\n",
|
||||
timeout_name, 4, timeout->l4proto->l4proto);
|
||||
goto err_put_timeout;
|
||||
}
|
||||
timeout_ext = nf_ct_timeout_ext_add(ct, timeout, GFP_ATOMIC);
|
||||
|
@ -180,6 +179,8 @@ err_put_timeout:
|
|||
__xt_ct_tg_timeout_put(timeout);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
if (errmsg)
|
||||
pr_info_ratelimited("%s\n", errmsg);
|
||||
return ret;
|
||||
#else
|
||||
return -EOPNOTSUPP;
|
||||
|
|
|
@ -66,10 +66,8 @@ static int dscp_tg_check(const struct xt_tgchk_param *par)
|
|||
{
|
||||
const struct xt_DSCP_info *info = par->targinfo;
|
||||
|
||||
if (info->dscp > XT_DSCP_MAX) {
|
||||
pr_info("dscp %x out of range\n", info->dscp);
|
||||
if (info->dscp > XT_DSCP_MAX)
|
||||
return -EDOM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -105,10 +105,8 @@ static int ttl_tg_check(const struct xt_tgchk_param *par)
|
|||
{
|
||||
const struct ipt_TTL_info *info = par->targinfo;
|
||||
|
||||
if (info->mode > IPT_TTL_MAXMODE) {
|
||||
pr_info("TTL: invalid or unknown mode %u\n", info->mode);
|
||||
if (info->mode > IPT_TTL_MAXMODE)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->mode != IPT_TTL_SET && info->ttl == 0)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
@ -118,15 +116,10 @@ static int hl_tg6_check(const struct xt_tgchk_param *par)
|
|||
{
|
||||
const struct ip6t_HL_info *info = par->targinfo;
|
||||
|
||||
if (info->mode > IP6T_HL_MAXMODE) {
|
||||
pr_info("invalid or unknown mode %u\n", info->mode);
|
||||
if (info->mode > IP6T_HL_MAXMODE)
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->mode != IP6T_HL_SET && info->hop_limit == 0) {
|
||||
pr_info("increment/decrement does not "
|
||||
"make sense with value 0\n");
|
||||
if (info->mode != IP6T_HL_SET && info->hop_limit == 0)
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,8 @@
|
|||
* the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/icmp.h>
|
||||
|
@ -312,29 +314,30 @@ hmark_tg_v4(struct sk_buff *skb, const struct xt_action_param *par)
|
|||
static int hmark_tg_check(const struct xt_tgchk_param *par)
|
||||
{
|
||||
const struct xt_hmark_info *info = par->targinfo;
|
||||
const char *errmsg = "proto mask must be zero with L3 mode";
|
||||
|
||||
if (!info->hmodulus) {
|
||||
pr_info("xt_HMARK: hash modulus can't be zero\n");
|
||||
if (!info->hmodulus)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->proto_mask &&
|
||||
(info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))) {
|
||||
pr_info("xt_HMARK: proto mask must be zero with L3 mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
(info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)))
|
||||
goto err;
|
||||
|
||||
if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI_MASK) &&
|
||||
(info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT_MASK) |
|
||||
XT_HMARK_FLAG(XT_HMARK_DPORT_MASK)))) {
|
||||
pr_info("xt_HMARK: spi-mask and port-mask can't be combined\n");
|
||||
XT_HMARK_FLAG(XT_HMARK_DPORT_MASK))))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->flags & XT_HMARK_FLAG(XT_HMARK_SPI) &&
|
||||
(info->flags & (XT_HMARK_FLAG(XT_HMARK_SPORT) |
|
||||
XT_HMARK_FLAG(XT_HMARK_DPORT)))) {
|
||||
pr_info("xt_HMARK: spi-set and port-set can't be combined\n");
|
||||
return -EINVAL;
|
||||
errmsg = "spi-set and port-set can't be combined";
|
||||
goto err;
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
pr_info_ratelimited("%s\n", errmsg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct xt_target hmark_tg_reg[] __read_mostly = {
|
||||
|
|
|
@ -146,11 +146,11 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
|
|||
timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
|
||||
info->timer->refcnt = 1;
|
||||
|
||||
INIT_WORK(&info->timer->work, idletimer_tg_work);
|
||||
|
||||
mod_timer(&info->timer->timer,
|
||||
msecs_to_jiffies(info->timeout * 1000) + jiffies);
|
||||
|
||||
INIT_WORK(&info->timer->work, idletimer_tg_work);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_attr:
|
||||
|
@ -191,7 +191,10 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
|
|||
pr_debug("timeout value is zero\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->timeout >= INT_MAX / 1000) {
|
||||
pr_debug("timeout value is too big\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->label[0] == '\0' ||
|
||||
strnlen(info->label,
|
||||
MAX_IDLETIMER_LABEL_SIZE) == MAX_IDLETIMER_LABEL_SIZE) {
|
||||
|
|
|
@ -111,10 +111,8 @@ static int led_tg_check(const struct xt_tgchk_param *par)
|
|||
struct xt_led_info_internal *ledinternal;
|
||||
int err;
|
||||
|
||||
if (ledinfo->id[0] == '\0') {
|
||||
pr_info("No 'id' parameter given.\n");
|
||||
if (ledinfo->id[0] == '\0')
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
mutex_lock(&xt_led_mutex);
|
||||
|
||||
|
@ -138,13 +136,14 @@ static int led_tg_check(const struct xt_tgchk_param *par)
|
|||
|
||||
err = led_trigger_register(&ledinternal->netfilter_led_trigger);
|
||||
if (err) {
|
||||
pr_err("Trigger name is already in use.\n");
|
||||
pr_info_ratelimited("Trigger name is already in use.\n");
|
||||
goto exit_alloc;
|
||||
}
|
||||
|
||||
/* See if we need to set up a timer */
|
||||
if (ledinfo->delay > 0)
|
||||
timer_setup(&ledinternal->timer, led_timeout_callback, 0);
|
||||
/* Since the letinternal timer can be shared between multiple targets,
|
||||
* always set it up, even if the current target does not need it
|
||||
*/
|
||||
timer_setup(&ledinternal->timer, led_timeout_callback, 0);
|
||||
|
||||
list_add_tail(&ledinternal->list, &xt_led_triggers);
|
||||
|
||||
|
@ -181,8 +180,7 @@ static void led_tg_destroy(const struct xt_tgdtor_param *par)
|
|||
|
||||
list_del(&ledinternal->list);
|
||||
|
||||
if (ledinfo->delay > 0)
|
||||
del_timer_sync(&ledinternal->timer);
|
||||
del_timer_sync(&ledinternal->timer);
|
||||
|
||||
led_trigger_unregister(&ledinternal->netfilter_led_trigger);
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
*
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
|
@ -67,13 +69,13 @@ static int nfqueue_tg_check(const struct xt_tgchk_param *par)
|
|||
init_hashrandom(&jhash_initval);
|
||||
|
||||
if (info->queues_total == 0) {
|
||||
pr_err("NFQUEUE: number of total queues is 0\n");
|
||||
pr_info_ratelimited("number of total queues is 0\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
maxid = info->queues_total - 1 + info->queuenum;
|
||||
if (maxid > 0xffff) {
|
||||
pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n",
|
||||
info->queues_total, maxid);
|
||||
pr_info_ratelimited("number of queues (%u) out of range (got %u)\n",
|
||||
info->queues_total, maxid);
|
||||
return -ERANGE;
|
||||
}
|
||||
if (par->target->revision == 2 && info->flags > 1)
|
||||
|
|
|
@ -60,18 +60,20 @@ static int checkentry_lsm(struct xt_secmark_target_info *info)
|
|||
&info->secid);
|
||||
if (err) {
|
||||
if (err == -EINVAL)
|
||||
pr_info("invalid security context \'%s\'\n", info->secctx);
|
||||
pr_info_ratelimited("invalid security context \'%s\'\n",
|
||||
info->secctx);
|
||||
return err;
|
||||
}
|
||||
|
||||
if (!info->secid) {
|
||||
pr_info("unable to map security context \'%s\'\n", info->secctx);
|
||||
pr_info_ratelimited("unable to map security context \'%s\'\n",
|
||||
info->secctx);
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
err = security_secmark_relabel_packet(info->secid);
|
||||
if (err) {
|
||||
pr_info("unable to obtain relabeling permission\n");
|
||||
pr_info_ratelimited("unable to obtain relabeling permission\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -86,14 +88,14 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
|
|||
|
||||
if (strcmp(par->table, "mangle") != 0 &&
|
||||
strcmp(par->table, "security") != 0) {
|
||||
pr_info("target only valid in the \'mangle\' "
|
||||
"or \'security\' tables, not \'%s\'.\n", par->table);
|
||||
pr_info_ratelimited("only valid in \'mangle\' or \'security\' table, not \'%s\'\n",
|
||||
par->table);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (mode && mode != info->mode) {
|
||||
pr_info("mode already set to %hu cannot mix with "
|
||||
"rules for mode %hu\n", mode, info->mode);
|
||||
pr_info_ratelimited("mode already set to %hu cannot mix with rules for mode %hu\n",
|
||||
mode, info->mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -101,7 +103,7 @@ static int secmark_tg_check(const struct xt_tgchk_param *par)
|
|||
case SECMARK_MODE_SEL:
|
||||
break;
|
||||
default:
|
||||
pr_info("invalid mode: %hu\n", info->mode);
|
||||
pr_info_ratelimited("invalid mode: %hu\n", info->mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -273,8 +273,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
|
|||
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
|
||||
(1 << NF_INET_LOCAL_OUT) |
|
||||
(1 << NF_INET_POST_ROUTING))) != 0) {
|
||||
pr_info("path-MTU clamping only supported in "
|
||||
"FORWARD, OUTPUT and POSTROUTING hooks\n");
|
||||
pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->nft_compat)
|
||||
|
@ -283,7 +282,7 @@ static int tcpmss_tg4_check(const struct xt_tgchk_param *par)
|
|||
xt_ematch_foreach(ematch, e)
|
||||
if (find_syn_match(ematch))
|
||||
return 0;
|
||||
pr_info("Only works on TCP SYN packets\n");
|
||||
pr_info_ratelimited("Only works on TCP SYN packets\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -298,8 +297,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
|
|||
(par->hook_mask & ~((1 << NF_INET_FORWARD) |
|
||||
(1 << NF_INET_LOCAL_OUT) |
|
||||
(1 << NF_INET_POST_ROUTING))) != 0) {
|
||||
pr_info("path-MTU clamping only supported in "
|
||||
"FORWARD, OUTPUT and POSTROUTING hooks\n");
|
||||
pr_info_ratelimited("path-MTU clamping only supported in FORWARD, OUTPUT and POSTROUTING hooks\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->nft_compat)
|
||||
|
@ -308,7 +306,7 @@ static int tcpmss_tg6_check(const struct xt_tgchk_param *par)
|
|||
xt_ematch_foreach(ematch, e)
|
||||
if (find_syn_match(ematch))
|
||||
return 0;
|
||||
pr_info("Only works on TCP SYN packets\n");
|
||||
pr_info_ratelimited("Only works on TCP SYN packets\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -540,8 +540,7 @@ static int tproxy_tg6_check(const struct xt_tgchk_param *par)
|
|||
!(i->invflags & IP6T_INV_PROTO))
|
||||
return 0;
|
||||
|
||||
pr_info("Can be used only in combination with "
|
||||
"either -p tcp or -p udp\n");
|
||||
pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
@ -559,8 +558,7 @@ static int tproxy_tg4_check(const struct xt_tgchk_param *par)
|
|||
&& !(i->invflags & IPT_INV_PROTO))
|
||||
return 0;
|
||||
|
||||
pr_info("Can be used only in combination with "
|
||||
"either -p tcp or -p udp\n");
|
||||
pr_info_ratelimited("Can be used only with -p tcp or -p udp\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -164,48 +164,47 @@ addrtype_mt_v1(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
|
||||
static int addrtype_mt_checkentry_v1(const struct xt_mtchk_param *par)
|
||||
{
|
||||
const char *errmsg = "both incoming and outgoing interface limitation cannot be selected";
|
||||
struct xt_addrtype_info_v1 *info = par->matchinfo;
|
||||
|
||||
if (info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN &&
|
||||
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
|
||||
pr_info("both incoming and outgoing "
|
||||
"interface limitation cannot be selected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT)
|
||||
goto err;
|
||||
|
||||
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
|
||||
(1 << NF_INET_LOCAL_IN)) &&
|
||||
info->flags & XT_ADDRTYPE_LIMIT_IFACE_OUT) {
|
||||
pr_info("output interface limitation "
|
||||
"not valid in PREROUTING and INPUT\n");
|
||||
return -EINVAL;
|
||||
errmsg = "output interface limitation not valid in PREROUTING and INPUT";
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
|
||||
(1 << NF_INET_LOCAL_OUT)) &&
|
||||
info->flags & XT_ADDRTYPE_LIMIT_IFACE_IN) {
|
||||
pr_info("input interface limitation "
|
||||
"not valid in POSTROUTING and OUTPUT\n");
|
||||
return -EINVAL;
|
||||
errmsg = "input interface limitation not valid in POSTROUTING and OUTPUT";
|
||||
goto err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
|
||||
if (par->family == NFPROTO_IPV6) {
|
||||
if ((info->source | info->dest) & XT_ADDRTYPE_BLACKHOLE) {
|
||||
pr_err("ipv6 BLACKHOLE matching not supported\n");
|
||||
return -EINVAL;
|
||||
errmsg = "ipv6 BLACKHOLE matching not supported";
|
||||
goto err;
|
||||
}
|
||||
if ((info->source | info->dest) >= XT_ADDRTYPE_PROHIBIT) {
|
||||
pr_err("ipv6 PROHIBIT (THROW, NAT ..) matching not supported\n");
|
||||
return -EINVAL;
|
||||
errmsg = "ipv6 PROHIBIT (THROW, NAT ..) matching not supported";
|
||||
goto err;
|
||||
}
|
||||
if ((info->source | info->dest) & XT_ADDRTYPE_BROADCAST) {
|
||||
pr_err("ipv6 does not support BROADCAST matching\n");
|
||||
return -EINVAL;
|
||||
errmsg = "ipv6 does not support BROADCAST matching";
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
err:
|
||||
pr_info_ratelimited("%s\n", errmsg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct xt_match addrtype_mt_reg[] __read_mostly = {
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
@ -34,7 +36,7 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len,
|
|||
program.filter = insns;
|
||||
|
||||
if (bpf_prog_create(ret, &program)) {
|
||||
pr_info("bpf: check failed: parse error\n");
|
||||
pr_info_ratelimited("check failed: parse error\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/netfilter/x_tables.h>
|
||||
|
@ -48,7 +50,7 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
|
|||
}
|
||||
|
||||
if (info->has_path && info->has_classid) {
|
||||
pr_info("xt_cgroup: both path and classid specified\n");
|
||||
pr_info_ratelimited("path and classid specified\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -56,8 +58,8 @@ static int cgroup_mt_check_v1(const struct xt_mtchk_param *par)
|
|||
if (info->has_path) {
|
||||
cgrp = cgroup_get_from_path(info->path);
|
||||
if (IS_ERR(cgrp)) {
|
||||
pr_info("xt_cgroup: invalid path, errno=%ld\n",
|
||||
PTR_ERR(cgrp));
|
||||
pr_info_ratelimited("invalid path, errno=%ld\n",
|
||||
PTR_ERR(cgrp));
|
||||
return -EINVAL;
|
||||
}
|
||||
info->priv = cgrp;
|
||||
|
|
|
@ -135,14 +135,12 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
|
|||
struct xt_cluster_match_info *info = par->matchinfo;
|
||||
|
||||
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
|
||||
pr_info("you have exceeded the maximum "
|
||||
"number of cluster nodes (%u > %u)\n",
|
||||
info->total_nodes, XT_CLUSTER_NODES_MAX);
|
||||
pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
|
||||
info->total_nodes, XT_CLUSTER_NODES_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->node_mask >= (1ULL << info->total_nodes)) {
|
||||
pr_info("this node mask cannot be "
|
||||
"higher than the total number of nodes\n");
|
||||
pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
|
||||
return -EDOM;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -112,8 +112,8 @@ static int connbytes_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0)
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
|
||||
/*
|
||||
* This filter cannot function correctly unless connection tracking
|
||||
|
|
|
@ -57,14 +57,15 @@ static int connlabel_mt_check(const struct xt_mtchk_param *par)
|
|||
int ret;
|
||||
|
||||
if (info->options & ~options) {
|
||||
pr_err("Unknown options in mask %x\n", info->options);
|
||||
pr_info_ratelimited("Unknown options in mask %x\n",
|
||||
info->options);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0) {
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -79,8 +79,8 @@ static int connmark_tg_check(const struct xt_tgchk_param *par)
|
|||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0)
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -109,8 +109,8 @@ static int connmark_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0)
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -272,8 +272,8 @@ static int conntrack_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0)
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,10 +46,8 @@ static int dscp_mt_check(const struct xt_mtchk_param *par)
|
|||
{
|
||||
const struct xt_dscp_info *info = par->matchinfo;
|
||||
|
||||
if (info->dscp > XT_DSCP_MAX) {
|
||||
pr_info("dscp %x out of range\n", info->dscp);
|
||||
if (info->dscp > XT_DSCP_MAX)
|
||||
return -EDOM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ static int ecn_mt_check4(const struct xt_mtchk_param *par)
|
|||
|
||||
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
|
||||
(ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
|
||||
pr_info("cannot match TCP bits in rule for non-tcp packets\n");
|
||||
pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -139,7 +139,7 @@ static int ecn_mt_check6(const struct xt_mtchk_param *par)
|
|||
|
||||
if (info->operation & (XT_ECN_OP_MATCH_ECE | XT_ECN_OP_MATCH_CWR) &&
|
||||
(ip->proto != IPPROTO_TCP || ip->invflags & IP6T_INV_PROTO)) {
|
||||
pr_info("cannot match TCP bits in rule for non-tcp packets\n");
|
||||
pr_info_ratelimited("cannot match TCP bits for non-tcp packets\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -523,7 +523,8 @@ static u64 user2rate(u64 user)
|
|||
if (user != 0) {
|
||||
return div64_u64(XT_HASHLIMIT_SCALE_v2, user);
|
||||
} else {
|
||||
pr_warn("invalid rate from userspace: %llu\n", user);
|
||||
pr_info_ratelimited("invalid rate from userspace: %llu\n",
|
||||
user);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -774,7 +775,7 @@ hashlimit_mt_common(const struct sk_buff *skb, struct xt_action_param *par,
|
|||
if (!dh->rateinfo.prev_window &&
|
||||
(dh->rateinfo.current_rate <= dh->rateinfo.burst)) {
|
||||
spin_unlock(&dh->lock);
|
||||
rcu_read_unlock_bh();
|
||||
local_bh_enable();
|
||||
return !(cfg->mode & XT_HASHLIMIT_INVERT);
|
||||
} else {
|
||||
goto overlimit;
|
||||
|
@ -865,33 +866,34 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
|
|||
}
|
||||
|
||||
if (cfg->mode & ~XT_HASHLIMIT_ALL) {
|
||||
pr_info("Unknown mode mask %X, kernel too old?\n",
|
||||
cfg->mode);
|
||||
pr_info_ratelimited("Unknown mode mask %X, kernel too old?\n",
|
||||
cfg->mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check for overflow. */
|
||||
if (revision >= 3 && cfg->mode & XT_HASHLIMIT_RATE_MATCH) {
|
||||
if (cfg->avg == 0 || cfg->avg > U32_MAX) {
|
||||
pr_info("hashlimit invalid rate\n");
|
||||
pr_info_ratelimited("invalid rate\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
if (cfg->interval == 0) {
|
||||
pr_info("hashlimit invalid interval\n");
|
||||
pr_info_ratelimited("invalid interval\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (cfg->mode & XT_HASHLIMIT_BYTES) {
|
||||
if (user2credits_byte(cfg->avg) == 0) {
|
||||
pr_info("overflow, rate too high: %llu\n", cfg->avg);
|
||||
pr_info_ratelimited("overflow, rate too high: %llu\n",
|
||||
cfg->avg);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else if (cfg->burst == 0 ||
|
||||
user2credits(cfg->avg * cfg->burst, revision) <
|
||||
user2credits(cfg->avg, revision)) {
|
||||
pr_info("overflow, try lower: %llu/%llu\n",
|
||||
cfg->avg, cfg->burst);
|
||||
return -ERANGE;
|
||||
user2credits(cfg->avg * cfg->burst, revision) <
|
||||
user2credits(cfg->avg, revision)) {
|
||||
pr_info_ratelimited("overflow, try lower: %llu/%llu\n",
|
||||
cfg->avg, cfg->burst);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
mutex_lock(&hashlimit_mutex);
|
||||
|
|
|
@ -61,8 +61,8 @@ static int helper_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0) {
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
return ret;
|
||||
}
|
||||
info->name[sizeof(info->name) - 1] = '\0';
|
||||
|
|
|
@ -72,7 +72,7 @@ static int comp_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
/* Must specify no unknown invflags */
|
||||
if (compinfo->invflags & ~XT_IPCOMP_INV_MASK) {
|
||||
pr_err("unknown flags %X\n", compinfo->invflags);
|
||||
pr_info_ratelimited("unknown flags %X\n", compinfo->invflags);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -158,7 +158,8 @@ static int ipvs_mt_check(const struct xt_mtchk_param *par)
|
|||
&& par->family != NFPROTO_IPV6
|
||||
#endif
|
||||
) {
|
||||
pr_info("protocol family %u not supported\n", par->family);
|
||||
pr_info_ratelimited("protocol family %u not supported\n",
|
||||
par->family);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -216,7 +216,7 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
|
|||
/* Check for invalid flags */
|
||||
if (info->flags & ~(XT_L2TP_TID | XT_L2TP_SID | XT_L2TP_VERSION |
|
||||
XT_L2TP_TYPE)) {
|
||||
pr_info("unknown flags: %x\n", info->flags);
|
||||
pr_info_ratelimited("unknown flags: %x\n", info->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -225,7 +225,8 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
|
|||
(!(info->flags & XT_L2TP_SID)) &&
|
||||
((!(info->flags & XT_L2TP_TYPE)) ||
|
||||
(info->type != XT_L2TP_TYPE_CONTROL))) {
|
||||
pr_info("invalid flags combination: %x\n", info->flags);
|
||||
pr_info_ratelimited("invalid flags combination: %x\n",
|
||||
info->flags);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -234,19 +235,22 @@ static int l2tp_mt_check(const struct xt_mtchk_param *par)
|
|||
*/
|
||||
if (info->flags & XT_L2TP_VERSION) {
|
||||
if ((info->version < 2) || (info->version > 3)) {
|
||||
pr_info("wrong L2TP version: %u\n", info->version);
|
||||
pr_info_ratelimited("wrong L2TP version: %u\n",
|
||||
info->version);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (info->version == 2) {
|
||||
if ((info->flags & XT_L2TP_TID) &&
|
||||
(info->tid > 0xffff)) {
|
||||
pr_info("v2 tid > 0xffff: %u\n", info->tid);
|
||||
pr_info_ratelimited("v2 tid > 0xffff: %u\n",
|
||||
info->tid);
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((info->flags & XT_L2TP_SID) &&
|
||||
(info->sid > 0xffff)) {
|
||||
pr_info("v2 sid > 0xffff: %u\n", info->sid);
|
||||
pr_info_ratelimited("v2 sid > 0xffff: %u\n",
|
||||
info->sid);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -268,13 +272,13 @@ static int l2tp_mt_check4(const struct xt_mtchk_param *par)
|
|||
|
||||
if ((ip->proto != IPPROTO_UDP) &&
|
||||
(ip->proto != IPPROTO_L2TP)) {
|
||||
pr_info("missing protocol rule (udp|l2tpip)\n");
|
||||
pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((ip->proto == IPPROTO_L2TP) &&
|
||||
(info->version == 2)) {
|
||||
pr_info("v2 doesn't support IP mode\n");
|
||||
pr_info_ratelimited("v2 doesn't support IP mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -295,13 +299,13 @@ static int l2tp_mt_check6(const struct xt_mtchk_param *par)
|
|||
|
||||
if ((ip->proto != IPPROTO_UDP) &&
|
||||
(ip->proto != IPPROTO_L2TP)) {
|
||||
pr_info("missing protocol rule (udp|l2tpip)\n");
|
||||
pr_info_ratelimited("missing protocol rule (udp|l2tpip)\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((ip->proto == IPPROTO_L2TP) &&
|
||||
(info->version == 2)) {
|
||||
pr_info("v2 doesn't support IP mode\n");
|
||||
pr_info_ratelimited("v2 doesn't support IP mode\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -106,8 +106,8 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
|
|||
/* Check for overflow. */
|
||||
if (r->burst == 0
|
||||
|| user2credits(r->avg * r->burst) < user2credits(r->avg)) {
|
||||
pr_info("Overflow, try lower: %u/%u\n",
|
||||
r->avg, r->burst);
|
||||
pr_info_ratelimited("Overflow, try lower: %u/%u\n",
|
||||
r->avg, r->burst);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,8 @@
|
|||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
#include <linux/netfilter.h>
|
||||
|
@ -19,8 +21,7 @@ static int xt_nat_checkentry_v0(const struct xt_tgchk_param *par)
|
|||
const struct nf_nat_ipv4_multi_range_compat *mr = par->targinfo;
|
||||
|
||||
if (mr->rangesize != 1) {
|
||||
pr_info("%s: multiple ranges no longer supported\n",
|
||||
par->target->name);
|
||||
pr_info_ratelimited("multiple ranges no longer supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return nf_ct_netns_get(par->net, par->family);
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
* it under the terms of the GNU General Public License version 2 (or any
|
||||
* later at your option) as published by the Free Software Foundation.
|
||||
*/
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
|
@ -39,8 +41,8 @@ nfacct_mt_checkentry(const struct xt_mtchk_param *par)
|
|||
|
||||
nfacct = nfnl_acct_find_get(par->net, info->name);
|
||||
if (nfacct == NULL) {
|
||||
pr_info("xt_nfacct: accounting object with name `%s' "
|
||||
"does not exists\n", info->name);
|
||||
pr_info_ratelimited("accounting object `%s' does not exists\n",
|
||||
info->name);
|
||||
return -ENOENT;
|
||||
}
|
||||
info->nfacct = nfacct;
|
||||
|
|
|
@ -107,9 +107,7 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
|
|||
info->invert & XT_PHYSDEV_OP_BRIDGED) &&
|
||||
par->hook_mask & ((1 << NF_INET_LOCAL_OUT) |
|
||||
(1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) {
|
||||
pr_info("using --physdev-out and --physdev-is-out are only "
|
||||
"supported in the FORWARD and POSTROUTING chains with "
|
||||
"bridged traffic.\n");
|
||||
pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n");
|
||||
if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -132,26 +132,29 @@ policy_mt(const struct sk_buff *skb, struct xt_action_param *par)
|
|||
static int policy_mt_check(const struct xt_mtchk_param *par)
|
||||
{
|
||||
const struct xt_policy_info *info = par->matchinfo;
|
||||
const char *errmsg = "neither incoming nor outgoing policy selected";
|
||||
|
||||
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT)))
|
||||
goto err;
|
||||
|
||||
if (!(info->flags & (XT_POLICY_MATCH_IN|XT_POLICY_MATCH_OUT))) {
|
||||
pr_info("neither incoming nor outgoing policy selected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (par->hook_mask & ((1 << NF_INET_PRE_ROUTING) |
|
||||
(1 << NF_INET_LOCAL_IN)) && info->flags & XT_POLICY_MATCH_OUT) {
|
||||
pr_info("output policy not valid in PREROUTING and INPUT\n");
|
||||
return -EINVAL;
|
||||
errmsg = "output policy not valid in PREROUTING and INPUT";
|
||||
goto err;
|
||||
}
|
||||
if (par->hook_mask & ((1 << NF_INET_POST_ROUTING) |
|
||||
(1 << NF_INET_LOCAL_OUT)) && info->flags & XT_POLICY_MATCH_IN) {
|
||||
pr_info("input policy not valid in POSTROUTING and OUTPUT\n");
|
||||
return -EINVAL;
|
||||
errmsg = "input policy not valid in POSTROUTING and OUTPUT";
|
||||
goto err;
|
||||
}
|
||||
if (info->len > XT_POLICY_MAX_ELEM) {
|
||||
pr_info("too many policy elements\n");
|
||||
return -EINVAL;
|
||||
errmsg = "too many policy elements";
|
||||
goto err;
|
||||
}
|
||||
return 0;
|
||||
err:
|
||||
pr_info_ratelimited("%s\n", errmsg);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct xt_match policy_mt_reg[] __read_mostly = {
|
||||
|
|
|
@ -342,8 +342,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
|
|||
net_get_random_once(&hash_rnd, sizeof(hash_rnd));
|
||||
|
||||
if (info->check_set & ~XT_RECENT_VALID_FLAGS) {
|
||||
pr_info("Unsupported user space flags (%08x)\n",
|
||||
info->check_set);
|
||||
pr_info_ratelimited("Unsupported userspace flags (%08x)\n",
|
||||
info->check_set);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (hweight8(info->check_set &
|
||||
|
@ -357,8 +357,8 @@ static int recent_mt_check(const struct xt_mtchk_param *par,
|
|||
if ((info->check_set & XT_RECENT_REAP) && !info->seconds)
|
||||
return -EINVAL;
|
||||
if (info->hit_count >= XT_RECENT_MAX_NSTAMPS) {
|
||||
pr_info("hitcount (%u) is larger than allowed maximum (%u)\n",
|
||||
info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
|
||||
pr_info_ratelimited("hitcount (%u) is larger than allowed maximum (%u)\n",
|
||||
info->hit_count, XT_RECENT_MAX_NSTAMPS - 1);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (info->name[0] == '\0' ||
|
||||
|
@ -587,7 +587,7 @@ recent_mt_proc_write(struct file *file, const char __user *input,
|
|||
add = true;
|
||||
break;
|
||||
default:
|
||||
pr_info("Need \"+ip\", \"-ip\" or \"/\"\n");
|
||||
pr_info_ratelimited("Need \"+ip\", \"-ip\" or \"/\"\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -601,10 +601,8 @@ recent_mt_proc_write(struct file *file, const char __user *input,
|
|||
succ = in4_pton(c, size, (void *)&addr, '\n', NULL);
|
||||
}
|
||||
|
||||
if (!succ) {
|
||||
pr_info("illegal address written to procfs\n");
|
||||
if (!succ)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_bh(&recent_lock);
|
||||
e = recent_entry_lookup(t, &addr, family, 0);
|
||||
|
|
|
@ -92,12 +92,12 @@ set_match_v0_checkentry(const struct xt_mtchk_param *par)
|
|||
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
|
||||
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find set identified by id %u to match\n",
|
||||
info->match_set.index);
|
||||
pr_info_ratelimited("Cannot find set identified by id %u to match\n",
|
||||
info->match_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
if (info->match_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
|
||||
pr_warn("Protocol error: set match dimension is over the limit!\n");
|
||||
pr_info_ratelimited("set match dimension is over the limit!\n");
|
||||
ip_set_nfnl_put(par->net, info->match_set.index);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
@ -143,12 +143,12 @@ set_match_v1_checkentry(const struct xt_mtchk_param *par)
|
|||
index = ip_set_nfnl_get_byindex(par->net, info->match_set.index);
|
||||
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find set identified by id %u to match\n",
|
||||
info->match_set.index);
|
||||
pr_info_ratelimited("Cannot find set identified by id %u to match\n",
|
||||
info->match_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
if (info->match_set.dim > IPSET_DIM_MAX) {
|
||||
pr_warn("Protocol error: set match dimension is over the limit!\n");
|
||||
pr_info_ratelimited("set match dimension is over the limit!\n");
|
||||
ip_set_nfnl_put(par->net, info->match_set.index);
|
||||
return -ERANGE;
|
||||
}
|
||||
|
@ -241,8 +241,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
|
|||
if (info->add_set.index != IPSET_INVALID_ID) {
|
||||
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find add_set index %u as target\n",
|
||||
info->add_set.index);
|
||||
pr_info_ratelimited("Cannot find add_set index %u as target\n",
|
||||
info->add_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
@ -250,8 +250,8 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
|
|||
if (info->del_set.index != IPSET_INVALID_ID) {
|
||||
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
pr_info_ratelimited("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net, info->add_set.index);
|
||||
return -ENOENT;
|
||||
|
@ -259,7 +259,7 @@ set_target_v0_checkentry(const struct xt_tgchk_param *par)
|
|||
}
|
||||
if (info->add_set.u.flags[IPSET_DIM_MAX - 1] != 0 ||
|
||||
info->del_set.u.flags[IPSET_DIM_MAX - 1] != 0) {
|
||||
pr_warn("Protocol error: SET target dimension is over the limit!\n");
|
||||
pr_info_ratelimited("SET target dimension over the limit!\n");
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net, info->add_set.index);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
@ -316,8 +316,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
|
|||
if (info->add_set.index != IPSET_INVALID_ID) {
|
||||
index = ip_set_nfnl_get_byindex(par->net, info->add_set.index);
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find add_set index %u as target\n",
|
||||
info->add_set.index);
|
||||
pr_info_ratelimited("Cannot find add_set index %u as target\n",
|
||||
info->add_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
@ -325,8 +325,8 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
|
|||
if (info->del_set.index != IPSET_INVALID_ID) {
|
||||
index = ip_set_nfnl_get_byindex(par->net, info->del_set.index);
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
pr_info_ratelimited("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net, info->add_set.index);
|
||||
return -ENOENT;
|
||||
|
@ -334,7 +334,7 @@ set_target_v1_checkentry(const struct xt_tgchk_param *par)
|
|||
}
|
||||
if (info->add_set.dim > IPSET_DIM_MAX ||
|
||||
info->del_set.dim > IPSET_DIM_MAX) {
|
||||
pr_warn("Protocol error: SET target dimension is over the limit!\n");
|
||||
pr_info_ratelimited("SET target dimension over the limit!\n");
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net, info->add_set.index);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
@ -444,8 +444,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
index = ip_set_nfnl_get_byindex(par->net,
|
||||
info->add_set.index);
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find add_set index %u as target\n",
|
||||
info->add_set.index);
|
||||
pr_info_ratelimited("Cannot find add_set index %u as target\n",
|
||||
info->add_set.index);
|
||||
return -ENOENT;
|
||||
}
|
||||
}
|
||||
|
@ -454,8 +454,8 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
index = ip_set_nfnl_get_byindex(par->net,
|
||||
info->del_set.index);
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
pr_info_ratelimited("Cannot find del_set index %u as target\n",
|
||||
info->del_set.index);
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net,
|
||||
info->add_set.index);
|
||||
|
@ -465,7 +465,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
|
||||
if (info->map_set.index != IPSET_INVALID_ID) {
|
||||
if (strncmp(par->table, "mangle", 7)) {
|
||||
pr_warn("--map-set only usable from mangle table\n");
|
||||
pr_info_ratelimited("--map-set only usable from mangle table\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
if (((info->flags & IPSET_FLAG_MAP_SKBPRIO) |
|
||||
|
@ -473,14 +473,14 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
!(par->hook_mask & (1 << NF_INET_FORWARD |
|
||||
1 << NF_INET_LOCAL_OUT |
|
||||
1 << NF_INET_POST_ROUTING))) {
|
||||
pr_warn("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
|
||||
pr_info_ratelimited("mapping of prio or/and queue is allowed only from OUTPUT/FORWARD/POSTROUTING chains\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
index = ip_set_nfnl_get_byindex(par->net,
|
||||
info->map_set.index);
|
||||
if (index == IPSET_INVALID_ID) {
|
||||
pr_warn("Cannot find map_set index %u as target\n",
|
||||
info->map_set.index);
|
||||
pr_info_ratelimited("Cannot find map_set index %u as target\n",
|
||||
info->map_set.index);
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net,
|
||||
info->add_set.index);
|
||||
|
@ -494,7 +494,7 @@ set_target_v3_checkentry(const struct xt_tgchk_param *par)
|
|||
if (info->add_set.dim > IPSET_DIM_MAX ||
|
||||
info->del_set.dim > IPSET_DIM_MAX ||
|
||||
info->map_set.dim > IPSET_DIM_MAX) {
|
||||
pr_warn("Protocol error: SET target dimension is over the limit!\n");
|
||||
pr_info_ratelimited("SET target dimension over the limit!\n");
|
||||
if (info->add_set.index != IPSET_INVALID_ID)
|
||||
ip_set_nfnl_put(par->net, info->add_set.index);
|
||||
if (info->del_set.index != IPSET_INVALID_ID)
|
||||
|
|
|
@ -171,7 +171,8 @@ static int socket_mt_v1_check(const struct xt_mtchk_param *par)
|
|||
return err;
|
||||
|
||||
if (info->flags & ~XT_SOCKET_FLAGS_V1) {
|
||||
pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V1);
|
||||
pr_info_ratelimited("unknown flags 0x%x\n",
|
||||
info->flags & ~XT_SOCKET_FLAGS_V1);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@ -187,7 +188,8 @@ static int socket_mt_v2_check(const struct xt_mtchk_param *par)
|
|||
return err;
|
||||
|
||||
if (info->flags & ~XT_SOCKET_FLAGS_V2) {
|
||||
pr_info("unknown flags 0x%x\n", info->flags & ~XT_SOCKET_FLAGS_V2);
|
||||
pr_info_ratelimited("unknown flags 0x%x\n",
|
||||
info->flags & ~XT_SOCKET_FLAGS_V2);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
@ -203,8 +205,8 @@ static int socket_mt_v3_check(const struct xt_mtchk_param *par)
|
|||
if (err)
|
||||
return err;
|
||||
if (info->flags & ~XT_SOCKET_FLAGS_V3) {
|
||||
pr_info("unknown flags 0x%x\n",
|
||||
info->flags & ~XT_SOCKET_FLAGS_V3);
|
||||
pr_info_ratelimited("unknown flags 0x%x\n",
|
||||
info->flags & ~XT_SOCKET_FLAGS_V3);
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -44,8 +44,8 @@ static int state_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
ret = nf_ct_netns_get(par->net, par->family);
|
||||
if (ret < 0)
|
||||
pr_info("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
|
||||
par->family);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -235,13 +235,13 @@ static int time_mt_check(const struct xt_mtchk_param *par)
|
|||
|
||||
if (info->daytime_start > XT_TIME_MAX_DAYTIME ||
|
||||
info->daytime_stop > XT_TIME_MAX_DAYTIME) {
|
||||
pr_info("invalid argument - start or "
|
||||
"stop time greater than 23:59:59\n");
|
||||
pr_info_ratelimited("invalid argument - start or stop time greater than 23:59:59\n");
|
||||
return -EDOM;
|
||||
}
|
||||
|
||||
if (info->flags & ~XT_TIME_ALL_FLAGS) {
|
||||
pr_info("unknown flags 0x%x\n", info->flags & ~XT_TIME_ALL_FLAGS);
|
||||
pr_info_ratelimited("unknown flags 0x%x\n",
|
||||
info->flags & ~XT_TIME_ALL_FLAGS);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -2308,7 +2308,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|||
if (cb->start) {
|
||||
ret = cb->start(cb);
|
||||
if (ret)
|
||||
goto error_unlock;
|
||||
goto error_put;
|
||||
}
|
||||
|
||||
nlk->cb_running = true;
|
||||
|
@ -2328,6 +2328,8 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
|
|||
*/
|
||||
return -EINTR;
|
||||
|
||||
error_put:
|
||||
module_put(control->module);
|
||||
error_unlock:
|
||||
sock_put(sk);
|
||||
mutex_unlock(nlk->cb_mutex);
|
||||
|
|
|
@ -445,7 +445,7 @@ send_fragmentable:
|
|||
(char *)&opt, sizeof(opt));
|
||||
if (ret == 0) {
|
||||
ret = kernel_sendmsg(conn->params.local->socket, &msg,
|
||||
iov, 1, iov[0].iov_len);
|
||||
iov, 2, len);
|
||||
|
||||
opt = IPV6_PMTUDISC_DO;
|
||||
kernel_setsockopt(conn->params.local->socket,
|
||||
|
|
|
@ -1397,13 +1397,18 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
nla_get_u32(tca[TCA_CHAIN]) != chain->index)
|
||||
continue;
|
||||
if (!tcf_chain_dump(chain, q, parent, skb, cb,
|
||||
index_start, &index))
|
||||
index_start, &index)) {
|
||||
err = -EMSGSIZE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
cb->args[0] = index;
|
||||
|
||||
out:
|
||||
/* If we did no progress, the error (EMSGSIZE) is real */
|
||||
if (skb->len == 0 && err)
|
||||
return err;
|
||||
return skb->len;
|
||||
}
|
||||
|
||||
|
|
|
@ -170,9 +170,28 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
|
|||
enum nl80211_bss_scan_width scan_width;
|
||||
struct ieee80211_supported_band *sband =
|
||||
rdev->wiphy.bands[setup->chandef.chan->band];
|
||||
scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
|
||||
setup->basic_rates = ieee80211_mandatory_rates(sband,
|
||||
scan_width);
|
||||
|
||||
if (setup->chandef.chan->band == NL80211_BAND_2GHZ) {
|
||||
int i;
|
||||
|
||||
/*
|
||||
* Older versions selected the mandatory rates for
|
||||
* 2.4 GHz as well, but were broken in that only
|
||||
* 1 Mbps was regarded as a mandatory rate. Keep
|
||||
* using just 1 Mbps as the default basic rate for
|
||||
* mesh to be interoperable with older versions.
|
||||
*/
|
||||
for (i = 0; i < sband->n_bitrates; i++) {
|
||||
if (sband->bitrates[i].bitrate == 10) {
|
||||
setup->basic_rates = BIT(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
scan_width = cfg80211_chandef_to_scan_width(&setup->chandef);
|
||||
setup->basic_rates = ieee80211_mandatory_rates(sband,
|
||||
scan_width);
|
||||
}
|
||||
}
|
||||
|
||||
err = cfg80211_chandef_dfs_required(&rdev->wiphy,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue