Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix up several Kconfig dependencies in netfilter, from Martin Willi
    and Florian Westphal.

 2) Memory leak in be2net driver, from Petr Oros.

 3) Memory leak in E-Switch handling of mlx5 driver, from Raed Salem.

 4) mlx5_attach_interface needs to check for errors, from Huy Nguyen.

 5) tipc_release() needs to orphan the sock, from Cong Wang.

 6) Need to program TxConfig register after TX/RX is enabled in r8169
    driver, not beforehand, from Maciej S. Szmigiero.

 7) Handle 64K PAGE_SIZE properly in ena driver, from Netanel Belgazal.

 8) Fix crash regression in ip_do_fragment(), from Taehee Yoo.

 9) syzbot can create conditions where kernel log is flooded with
    synflood warnings due to creation of many listening sockets, fix
    that. From Willem de Bruijn.

10) Fix RCU issues in rds socket layer, from Cong Wang.

11) Fix vlan matching in nfp driver, from Pieter Jansen van Vuuren.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (59 commits)
  nfp: flower: reject tunnel encap with ipv6 outer headers for offloading
  nfp: flower: fix vlan match by checking both vlan id and vlan pcp
  tipc: check return value of __tipc_dump_start()
  s390/qeth: don't dump past end of unknown HW header
  s390/qeth: use vzalloc for QUERY OAT buffer
  s390/qeth: switch on SG by default for IQD devices
  s390/qeth: indicate error when netdev allocation fails
  rds: fix two RCU related problems
  r8169: Clear RTL_FLAG_TASK_*_PENDING when clearing RTL_FLAG_TASK_ENABLED
  erspan: fix error handling for erspan tunnel
  erspan: return PACKET_REJECT when the appropriate tunnel is not found
  tcp: rate limit synflood warnings further
  MIPS: lantiq: dma: add dev pointer
  netfilter: xt_hashlimit: use s->file instead of s->private
  netfilter: nfnetlink_queue: Solve the NFQUEUE/conntrack clash for NF_REPEAT
  netfilter: cttimeout: ctnl_timeout_find_get() returns incorrect pointer to type
  netfilter: conntrack: timeout interface depend on CONFIG_NF_CONNTRACK_TIMEOUT
  netfilter: conntrack: reset tcp maxwin on re-register
  qmi_wwan: Support dynamic config on Quectel EP06
  ethernet: renesas: convert to SPDX identifiers
  ...
This commit is contained in:
Linus Torvalds 2018-09-12 17:32:50 -10:00
commit 67b076095d
68 changed files with 594 additions and 401 deletions

View file

@ -40,6 +40,7 @@ struct ltq_dma_channel {
int desc; /* the current descriptor */
struct ltq_dma_desc *desc_base; /* the descriptor base */
int phys; /* physical addr */
struct device *dev;
};
enum {

View file

@ -130,7 +130,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
unsigned long flags;
ch->desc = 0;
ch->desc_base = dma_zalloc_coherent(NULL,
ch->desc_base = dma_zalloc_coherent(ch->dev,
LTQ_DESC_NUM * LTQ_DESC_SIZE,
&ch->phys, GFP_ATOMIC);
@ -182,7 +182,7 @@ ltq_dma_free(struct ltq_dma_channel *ch)
if (!ch->desc_base)
return;
ltq_dma_close(ch);
dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE,
dma_free_coherent(ch->dev, LTQ_DESC_NUM * LTQ_DESC_SIZE,
ch->desc_base, ch->phys);
}
EXPORT_SYMBOL_GPL(ltq_dma_free);

View file

@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu
cqe = &admin_queue->cq.entries[head_masked];
/* Go over all the completions */
while ((cqe->acq_common_descriptor.flags &
while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Do not read the rest of the completion entry before the
* phase bit was validated
*/
rmb();
dma_rmb();
ena_com_handle_single_admin_completion(admin_queue, cqe);
head_masked++;
@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
mmio_read_reg |= mmio_read->seq_num &
ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
/* make sure read_resp->req_id get updated before the hw can write
* there
*/
wmb();
writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
writel_relaxed(mmio_read_reg,
ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
mmiowb();
for (i = 0; i < timeout; i++) {
if (read_resp->req_id == mmio_read->seq_num)
if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
break;
udelay(1);
@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
aenq_common = &aenq_e->aenq_common_desc;
/* Go over all the events */
while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) ==
phase) {
while ((READ_ONCE(aenq_common->flags) &
ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
/* Make sure the phase bit (ownership) is as expected before
* reading the rest of the descriptor.
*/
dma_rmb();
pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
aenq_common->group, aenq_common->syndrom,
(u64)aenq_common->timestamp_low +

View file

@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
if (desc_phase != expected_phase)
return NULL;
/* Make sure we read the rest of the descriptor after the phase bit
* has been read
*/
dma_rmb();
return cdesc;
}
@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id)
if (cdesc_phase != expected_phase)
return -EAGAIN;
dma_rmb();
if (unlikely(cdesc->req_id >= io_cq->q_depth)) {
pr_err("Invalid req id %d\n", cdesc->req_id);
return -EINVAL;

View file

@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
return io_sq->q_depth - 1 - cnt;
}
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
bool relaxed)
static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
{
u16 tail;
@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq,
pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
io_sq->qid, tail);
if (relaxed)
writel_relaxed(tail, io_sq->db_addr);
else
writel(tail, io_sq->db_addr);
writel(tail, io_sq->db_addr);
return 0;
}

View file

@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl);
static int ena_rss_init_default(struct ena_adapter *adapter);
static void check_for_admin_com_state(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter);
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful);
static int ena_restore_device(struct ena_adapter *adapter);
static void ena_tx_timeout(struct net_device *dev)
@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
return -ENOMEM;
}
dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
u64_stats_update_begin(&rx_ring->syncp);
@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring,
rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = dma;
ena_buf->len = PAGE_SIZE;
ena_buf->len = ENA_PAGE_SIZE;
return 0;
}
@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
return;
}
dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(page);
@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num)
rx_ring->qid, i, num);
}
if (likely(i)) {
/* Add memory barrier to make sure the desc were written before
* issue a doorbell
*/
wmb();
ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true);
mmiowb();
}
/* ena_com_write_sq_doorbell issues a wmb() */
if (likely(i))
ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq);
rx_ring->next_to_use = next_to_use;
@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
do {
dma_unmap_page(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr),
PAGE_SIZE, DMA_FROM_DEVICE);
ENA_PAGE_SIZE, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
rx_info->page_offset, len, PAGE_SIZE);
rx_info->page_offset, len, ENA_PAGE_SIZE);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx skb updated. len %d. data_len %d\n",
@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev)
"Destroy failure, restarting device\n");
ena_dump_stats_to_dmesg(adapter);
/* rtnl lock already obtained in dev_ioctl() layer */
ena_destroy_device(adapter);
ena_destroy_device(adapter, false);
ena_restore_device(adapter);
}
@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use,
tx_ring->ring_size);
/* This WMB is aimed to:
* 1 - perform smp barrier before reading next_to_completion
* 2 - make sure the desc were written before trigger DB
*/
wmb();
/* stop the queue when no more space available, the packet can have up
* to sgl_size + 2. one for the meta descriptor and one for header
* (if the header is larger than tx_max_header_size).
@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
* stop the queue but meanwhile clean_tx_irq updates
* next_to_completion and terminates.
* The queue will remain stopped forever.
* To solve this issue this function perform rmb, check
* the wakeup condition and wake up the queue if needed.
* To solve this issue add a mb() to make sure that
* netif_tx_stop_queue() write is vissible before checking if
* there is additional space in the queue.
*/
smp_rmb();
smp_mb();
if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
> ENA_TX_WAKEUP_THRESH) {
@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
if (netif_xmit_stopped(txq) || !skb->xmit_more) {
/* trigger the dma engine */
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false);
/* trigger the dma engine. ena_com_write_sq_doorbell()
* has a mb
*/
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
u64_stats_update_begin(&tx_ring->syncp);
tx_ring->tx_stats.doorbells++;
u64_stats_update_end(&tx_ring->syncp);
@ -2550,12 +2542,15 @@ err_disable_msix:
return rc;
}
static void ena_destroy_device(struct ena_adapter *adapter)
static void ena_destroy_device(struct ena_adapter *adapter, bool graceful)
{
struct net_device *netdev = adapter->netdev;
struct ena_com_dev *ena_dev = adapter->ena_dev;
bool dev_up;
if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
return;
netif_carrier_off(netdev);
del_timer_sync(&adapter->timer_service);
@ -2563,7 +2558,8 @@ static void ena_destroy_device(struct ena_adapter *adapter)
dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
adapter->dev_up_before_reset = dev_up;
ena_com_set_admin_running_state(ena_dev, false);
if (!graceful)
ena_com_set_admin_running_state(ena_dev, false);
if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
ena_down(adapter);
@ -2591,6 +2587,7 @@ static void ena_destroy_device(struct ena_adapter *adapter)
adapter->reset_reason = ENA_REGS_RESET_NORMAL;
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
}
static int ena_restore_device(struct ena_adapter *adapter)
@ -2635,6 +2632,7 @@ static int ena_restore_device(struct ena_adapter *adapter)
}
}
set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_err(&pdev->dev, "Device reset completed successfully\n");
@ -2665,7 +2663,7 @@ static void ena_fw_reset_device(struct work_struct *work)
return;
}
rtnl_lock();
ena_destroy_device(adapter);
ena_destroy_device(adapter, false);
ena_restore_device(adapter);
rtnl_unlock();
}
@ -3409,30 +3407,24 @@ static void ena_remove(struct pci_dev *pdev)
netdev->rx_cpu_rmap = NULL;
}
#endif /* CONFIG_RFS_ACCEL */
unregister_netdev(netdev);
del_timer_sync(&adapter->timer_service);
cancel_work_sync(&adapter->reset_task);
/* Reset the device only if the device is running. */
unregister_netdev(netdev);
/* If the device is running then we want to make sure the device will be
* reset to make sure no more events will be issued by the device.
*/
if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags))
ena_com_dev_reset(ena_dev, adapter->reset_reason);
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
ena_free_mgmnt_irq(adapter);
ena_disable_msix(adapter);
rtnl_lock();
ena_destroy_device(adapter, true);
rtnl_unlock();
free_netdev(netdev);
ena_com_mmio_reg_read_request_destroy(ena_dev);
ena_com_abort_admin_commands(ena_dev);
ena_com_wait_for_abort_completion(ena_dev);
ena_com_admin_destroy(ena_dev);
ena_com_rss_destroy(ena_dev);
ena_com_delete_debug_area(ena_dev);
@ -3467,7 +3459,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state)
"ignoring device reset request as the device is being suspended\n");
clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
ena_destroy_device(adapter);
ena_destroy_device(adapter, true);
rtnl_unlock();
return 0;
}

View file

@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf);
int ena_get_sset_count(struct net_device *netdev, int sset);
/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
* driver passas 0.
* Since the max packet size the ENA handles is ~9kB limit the buffer length to
* 16kB.
*/
#if PAGE_SIZE > SZ_16K
#define ENA_PAGE_SIZE SZ_16K
#else
#define ENA_PAGE_SIZE PAGE_SIZE
#endif
#endif /* !(ENA_H) */

View file

@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
}
}
return status;
goto err;
}
pcie = be_get_pcie_desc(resp->func_param, desc_count,

View file

@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev)
struct ltq_etop_chan *ch = &priv->ch[i];
ch->idx = ch->dma.nr = i;
ch->dma.dev = &priv->pdev->dev;
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);

View file

@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
if (intf->attach)
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
if (dev_ctx->context) {
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
if (intf->attach)
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
goto out;
intf->attach(dev, dev_ctx->context);
if (intf->attach(dev, dev_ctx->context))
goto out;
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
goto out;
dev_ctx->context = intf->add(dev);
if (!dev_ctx->context)
goto out;
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
}
}
static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
{
return (u16)((dev->pdev->bus->number << 8) |
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
(dev->pdev->bus->number << 8) |
PCI_SLOT(dev->pdev->devfn));
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
u16 pci_id = mlx5_gen_pci_id(dev);
u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;

View file

@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {

View file

@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto miss_rule_err;
kvfree(flow_group_in);
return 0;
miss_rule_err:

View file

@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
return version;
}
static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group *g,
u32 *match_value,
bool take_write)
{
struct fs_fte *fte_tmp;
if (take_write)
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
else
nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
fte_tmp = NULL;
goto out;
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
if (take_write)
up_write_ref_node(&g->node);
else
up_read_ref_node(&g->node);
return fte_tmp;
}
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
if (IS_ERR(fte))
return ERR_PTR(-ENOMEM);
list_for_each_entry(iter, match_head, list) {
nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
}
search_again_locked:
version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
@ -1611,20 +1634,9 @@ search_again_locked:
struct fs_fte *fte_tmp;
g = iter->g;
fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node))
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
if (!take_write) {
list_for_each_entry(iter, match_head, list)
up_read_ref_node(&iter->g->node);
} else {
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
}
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte_tmp);
up_write_ref_node(&fte_tmp->node);
@ -1633,19 +1645,6 @@ search_again_locked:
return rule;
}
/* No group with matching fte found. Try to add a new fte to any
* matching fg.
*/
if (!take_write) {
list_for_each_entry(iter, match_head, list)
up_read_ref_node(&iter->g->node);
list_for_each_entry(iter, match_head, list)
nested_down_write_ref_node(&iter->g->node,
FS_LOCK_PARENT);
take_write = true;
}
/* Check the ft version, for case that new flow group
* was added while the fgs weren't locked
*/
@ -1657,27 +1656,30 @@ search_again_locked:
/* Check the fgs version, for case the new FTE with the
* same values was added while the fgs weren't locked
*/
if (version != matched_fgs_get_version(match_head))
if (version != matched_fgs_get_version(match_head)) {
take_write = true;
goto search_again_locked;
}
list_for_each_entry(iter, match_head, list) {
g = iter->g;
if (!g->node.active)
continue;
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
err = insert_fte(g, fte);
if (err) {
up_write_ref_node(&g->node);
if (err == -ENOSPC)
continue;
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return ERR_PTR(err);
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
up_write_ref_node(&g->node);
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node);
@ -1686,8 +1688,6 @@ search_again_locked:
}
rule = ERR_PTR(-ENOENT);
out:
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@ -1726,6 +1726,8 @@ search_again_locked:
if (err) {
if (take_write)
up_write_ref_node(&ft->node);
else
up_read_ref_node(&ft->node);
return ERR_PTR(err);
}

View file

@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
add_timer(&health->timer);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
{
struct mlx5_core_health *health = &dev->priv.health;
unsigned long flags;
if (disable_health) {
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock_irqrestore(&health->wq_lock, flags);
}
del_timer_sync(&health->timer);
}

View file

@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
if (!priv->dbg_root) {
dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
return -ENOMEM;
}
err = mlx5_pci_enable_device(dev);
if (err) {
@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
debugfs_remove(priv->dbg_root);
debugfs_remove_recursive(priv->dbg_root);
}
static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
@ -1286,7 +1288,7 @@ err_cleanup_once:
mlx5_cleanup_once(dev);
err_stop_poll:
mlx5_stop_health_poll(dev);
mlx5_stop_health_poll(dev, boot);
if (mlx5_cmd_teardown_hca(dev)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out_err;
@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_free_irq_vectors(dev);
if (cleanup)
mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev);
mlx5_stop_health_poll(dev, cleanup);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
* with the HCA, so the health polll is no longer needed.
*/
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {

View file

@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
{
return (u32)wq->fbc.frag_sz_m1 + 1;
return wq->fbc.frag_sz_m1 + 1;
}
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
@ -138,7 +138,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u32 sq_strides_offset;
u16 sq_strides_offset;
u32 rq_pg_remainder;
int err;

View file

@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,

View file

@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(1500, 9, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 0, 0),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(0, 140000, 15),
MLXSW_SP_SB_CM(1, 0xff, 0),
};

View file

@ -52,6 +52,7 @@
#define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01)
#define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04)
#define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800)
#define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX
#define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \
NFP_FL_TUNNEL_KEY | \
NFP_FL_TUNNEL_GENEVE_OPT)
@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
nfp_fl_push_vlan(psh_v, a);
*a_len += sizeof(struct nfp_fl_push_vlan);
} else if (is_tcf_tunnel_set(a)) {
struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
struct nfp_repr *repr = netdev_priv(netdev);
*tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
if (*tun_type == NFP_FL_TUNNEL_NONE)
return -EOPNOTSUPP;
if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
return -EOPNOTSUPP;
/* Pre-tunnel action is required for tunnel encap.
* This checks for next hop entries on NFP.
* If none, the packet falls back before applying other actions.

View file

@ -70,6 +70,7 @@ struct nfp_app;
#define NFP_FL_FEATS_GENEVE BIT(0)
#define NFP_FL_NBI_MTU_SETTING BIT(1)
#define NFP_FL_FEATS_GENEVE_OPT BIT(2)
#define NFP_FL_FEATS_VLAN_PCP BIT(3)
#define NFP_FL_FEATS_LAG BIT(31)
struct nfp_fl_mask_id {

View file

@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame,
FLOW_DISSECTOR_KEY_VLAN,
target);
/* Populate the tci field. */
if (flow_vlan->vlan_id) {
if (flow_vlan->vlan_id || flow_vlan->vlan_priority) {
tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
flow_vlan->vlan_priority) |
FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,

View file

@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
key_size += sizeof(struct nfp_flower_mac_mpls);
}
if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
struct flow_dissector_key_vlan *flow_vlan;
flow_vlan = skb_flow_dissector_target(flow->dissector,
FLOW_DISSECTOR_KEY_VLAN,
flow->mask);
if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
flow_vlan->vlan_priority)
return -EOPNOTSUPP;
}
if (dissector_uses_key(flow->dissector,
FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;

View file

@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result)
{
__be16 rx_data;
__be16 tx_data;
struct spi_transfer *transfer;
struct spi_message *msg;
struct spi_transfer transfer[2];
struct spi_message msg;
int ret;
memset(transfer, 0, sizeof(transfer));
spi_message_init(&msg);
tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg);
*result = 0;
transfer[0].tx_buf = &tx_data;
transfer[0].len = QCASPI_CMD_LEN;
transfer[1].rx_buf = &rx_data;
transfer[1].len = QCASPI_CMD_LEN;
spi_message_add_tail(&transfer[0], &msg);
if (qca->legacy_mode) {
msg = &qca->spi_msg1;
transfer = &qca->spi_xfer1;
transfer->tx_buf = &tx_data;
transfer->rx_buf = NULL;
transfer->len = QCASPI_CMD_LEN;
spi_sync(qca->spi_dev, msg);
} else {
msg = &qca->spi_msg2;
transfer = &qca->spi_xfer2[0];
transfer->tx_buf = &tx_data;
transfer->rx_buf = NULL;
transfer->len = QCASPI_CMD_LEN;
transfer = &qca->spi_xfer2[1];
spi_sync(qca->spi_dev, &msg);
spi_message_init(&msg);
}
transfer->tx_buf = NULL;
transfer->rx_buf = &rx_data;
transfer->len = QCASPI_CMD_LEN;
ret = spi_sync(qca->spi_dev, msg);
spi_message_add_tail(&transfer[1], &msg);
ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
ret = msg->status;
ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
@ -86,35 +85,32 @@ int
qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value)
{
__be16 tx_data[2];
struct spi_transfer *transfer;
struct spi_message *msg;
struct spi_transfer transfer[2];
struct spi_message msg;
int ret;
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg);
tx_data[1] = cpu_to_be16(value);
transfer[0].tx_buf = &tx_data[0];
transfer[0].len = QCASPI_CMD_LEN;
transfer[1].tx_buf = &tx_data[1];
transfer[1].len = QCASPI_CMD_LEN;
spi_message_add_tail(&transfer[0], &msg);
if (qca->legacy_mode) {
msg = &qca->spi_msg1;
transfer = &qca->spi_xfer1;
transfer->tx_buf = &tx_data[0];
transfer->rx_buf = NULL;
transfer->len = QCASPI_CMD_LEN;
spi_sync(qca->spi_dev, msg);
} else {
msg = &qca->spi_msg2;
transfer = &qca->spi_xfer2[0];
transfer->tx_buf = &tx_data[0];
transfer->rx_buf = NULL;
transfer->len = QCASPI_CMD_LEN;
transfer = &qca->spi_xfer2[1];
spi_sync(qca->spi_dev, &msg);
spi_message_init(&msg);
}
transfer->tx_buf = &tx_data[1];
transfer->rx_buf = NULL;
transfer->len = QCASPI_CMD_LEN;
ret = spi_sync(qca->spi_dev, msg);
spi_message_add_tail(&transfer[1], &msg);
ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
ret = msg->status;
ret = msg.status;
if (ret)
qcaspi_spi_error(qca);

View file

@ -99,22 +99,24 @@ static u32
qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
{
__be16 cmd;
struct spi_message *msg = &qca->spi_msg2;
struct spi_transfer *transfer = &qca->spi_xfer2[0];
struct spi_message msg;
struct spi_transfer transfer[2];
int ret;
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL);
transfer->tx_buf = &cmd;
transfer->rx_buf = NULL;
transfer->len = QCASPI_CMD_LEN;
transfer = &qca->spi_xfer2[1];
transfer->tx_buf = src;
transfer->rx_buf = NULL;
transfer->len = len;
transfer[0].tx_buf = &cmd;
transfer[0].len = QCASPI_CMD_LEN;
transfer[1].tx_buf = src;
transfer[1].len = len;
ret = spi_sync(qca->spi_dev, msg);
spi_message_add_tail(&transfer[0], &msg);
spi_message_add_tail(&transfer[1], &msg);
ret = spi_sync(qca->spi_dev, &msg);
if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
qcaspi_spi_error(qca);
return 0;
}
@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len)
static u32
qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
{
struct spi_message *msg = &qca->spi_msg1;
struct spi_transfer *transfer = &qca->spi_xfer1;
struct spi_message msg;
struct spi_transfer transfer;
int ret;
transfer->tx_buf = src;
transfer->rx_buf = NULL;
transfer->len = len;
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
ret = spi_sync(qca->spi_dev, msg);
transfer.tx_buf = src;
transfer.len = len;
if (ret || (msg->actual_length != len)) {
spi_message_add_tail(&transfer, &msg);
ret = spi_sync(qca->spi_dev, &msg);
if (ret || (msg.actual_length != len)) {
qcaspi_spi_error(qca);
return 0;
}
@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len)
static u32
qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
{
struct spi_message *msg = &qca->spi_msg2;
struct spi_message msg;
__be16 cmd;
struct spi_transfer *transfer = &qca->spi_xfer2[0];
struct spi_transfer transfer[2];
int ret;
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL);
transfer->tx_buf = &cmd;
transfer->rx_buf = NULL;
transfer->len = QCASPI_CMD_LEN;
transfer = &qca->spi_xfer2[1];
transfer->tx_buf = NULL;
transfer->rx_buf = dst;
transfer->len = len;
transfer[0].tx_buf = &cmd;
transfer[0].len = QCASPI_CMD_LEN;
transfer[1].rx_buf = dst;
transfer[1].len = len;
ret = spi_sync(qca->spi_dev, msg);
spi_message_add_tail(&transfer[0], &msg);
spi_message_add_tail(&transfer[1], &msg);
ret = spi_sync(qca->spi_dev, &msg);
if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) {
if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) {
qcaspi_spi_error(qca);
return 0;
}
@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len)
static u32
qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len)
{
struct spi_message *msg = &qca->spi_msg1;
struct spi_transfer *transfer = &qca->spi_xfer1;
struct spi_message msg;
struct spi_transfer transfer;
int ret;
transfer->tx_buf = NULL;
transfer->rx_buf = dst;
transfer->len = len;
memset(&transfer, 0, sizeof(transfer));
spi_message_init(&msg);
ret = spi_sync(qca->spi_dev, msg);
transfer.rx_buf = dst;
transfer.len = len;
if (ret || (msg->actual_length != len)) {
spi_message_add_tail(&transfer, &msg);
ret = spi_sync(qca->spi_dev, &msg);
if (ret || (msg.actual_length != len)) {
qcaspi_spi_error(qca);
return 0;
}
@ -195,19 +205,23 @@ static int
qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd)
{
__be16 tx_data;
struct spi_message *msg = &qca->spi_msg1;
struct spi_transfer *transfer = &qca->spi_xfer1;
struct spi_message msg;
struct spi_transfer transfer;
int ret;
tx_data = cpu_to_be16(cmd);
transfer->len = sizeof(tx_data);
transfer->tx_buf = &tx_data;
transfer->rx_buf = NULL;
memset(&transfer, 0, sizeof(transfer));
ret = spi_sync(qca->spi_dev, msg);
spi_message_init(&msg);
tx_data = cpu_to_be16(cmd);
transfer.len = sizeof(cmd);
transfer.tx_buf = &tx_data;
spi_message_add_tail(&transfer, &msg);
ret = spi_sync(qca->spi_dev, &msg);
if (!ret)
ret = msg->status;
ret = msg.status;
if (ret)
qcaspi_spi_error(qca);
@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev)
qca = netdev_priv(dev);
memset(qca, 0, sizeof(struct qcaspi));
memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer));
memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2);
spi_message_init(&qca->spi_msg1);
spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1);
spi_message_init(&qca->spi_msg2);
spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2);
spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2);
memset(&qca->txr, 0, sizeof(qca->txr));
qca->txr.count = TX_RING_MAX_LEN;
}

View file

@ -83,11 +83,6 @@ struct qcaspi {
struct tx_ring txr;
struct qcaspi_stats stats;
struct spi_message spi_msg1;
struct spi_message spi_msg2;
struct spi_transfer spi_xfer1;
struct spi_transfer spi_xfer2[2];
u8 *rx_buffer;
u32 buffer_size;
u8 sync;

View file

@ -631,7 +631,7 @@ struct rtl8169_tc_offsets {
};
enum rtl_flag {
RTL_FLAG_TASK_ENABLED,
RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_MAX
@ -4634,13 +4634,13 @@ static void rtl_hw_start(struct rtl8169_private *tp)
rtl_set_rx_max_size(tp);
rtl_set_rx_tx_desc_registers(tp);
rtl_set_tx_config_registers(tp);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
RTL_R8(tp, IntrMask);
RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
rtl_set_tx_config_registers(tp);
rtl_set_rx_mode(tp->dev);
/* no early-rx interrupts */
@ -6655,7 +6655,8 @@ static int rtl8169_close(struct net_device *dev)
rtl8169_update_counters(tp);
rtl_lock_work(tp);
clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
/* Clear all task flags */
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
rtl8169_down(dev);
rtl_unlock_work(tp);
@ -6838,7 +6839,9 @@ static void rtl8169_net_suspend(struct net_device *dev)
rtl_lock_work(tp);
napi_disable(&tp->napi);
clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
/* Clear all task flags */
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
rtl_unlock_work(tp);
rtl_pll_power_down(tp);

View file

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
#
# Renesas device configuration
#

View file

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Renesas device drivers.
#

View file

@ -1,13 +1,9 @@
// SPDX-License-Identifier: GPL-2.0+
/* PTP 1588 clock using the Renesas Ethernet AVB
*
* Copyright (C) 2013-2015 Renesas Electronics Corporation
* Copyright (C) 2015 Renesas Solutions Corp.
* Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include "ravb.h"

View file

@ -967,6 +967,13 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
{ /* Quectel EP06/EG06/EM06 */
USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC,
0xff),
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
},
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = {
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
return false;
}
static bool quectel_ep06_diag_detected(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
intf_desc.bNumEndpoints == 2)
return true;
return false;
}
static int qmi_wwan_probe(struct usb_interface *intf,
const struct usb_device_id *prod)
{
@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf,
return -ENODEV;
}
/* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
* we need to match on class/subclass/protocol. These values are
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
* different. Ignore the current interface if the number of endpoints
* the number for the diag interface (two).
*/
if (quectel_ep06_diag_detected(intf))
return -ENODEV;
return usbnet_probe(intf, id);
}

View file

@ -87,8 +87,7 @@ struct netfront_cb {
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
static DECLARE_WAIT_QUEUE_HEAD(module_load_q);
static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
static DECLARE_WAIT_QUEUE_HEAD(module_wq);
struct netfront_stats {
u64 packets;
@ -1332,11 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
netif_carrier_off(netdev);
xenbus_switch_state(dev, XenbusStateInitialising);
wait_event(module_load_q,
xenbus_read_driver_state(dev->otherend) !=
XenbusStateClosed &&
xenbus_read_driver_state(dev->otherend) !=
XenbusStateUnknown);
wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) !=
XenbusStateClosed &&
xenbus_read_driver_state(dev->otherend) !=
XenbusStateUnknown);
return netdev;
exit:
@ -2010,15 +2009,14 @@ static void netback_changed(struct xenbus_device *dev,
dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
wake_up_all(&module_wq);
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
break;
case XenbusStateUnknown:
wake_up_all(&module_unload_q);
break;
case XenbusStateInitWait:
@ -2034,12 +2032,10 @@ static void netback_changed(struct xenbus_device *dev,
break;
case XenbusStateClosed:
wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
@ -2147,14 +2143,14 @@ static int xennet_remove(struct xenbus_device *dev)
if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
xenbus_switch_state(dev, XenbusStateClosing);
wait_event(module_unload_q,
wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosing ||
xenbus_read_driver_state(dev->otherend) ==
XenbusStateUnknown);
xenbus_switch_state(dev, XenbusStateClosed);
wait_event(module_unload_q,
wait_event(module_wq,
xenbus_read_driver_state(dev->otherend) ==
XenbusStateClosed ||
xenbus_read_driver_state(dev->otherend) ==

View file

@ -25,6 +25,7 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
@ -4699,7 +4700,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
priv.buffer_len = oat_data.buffer_len;
priv.response_len = 0;
priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
priv.buffer = vzalloc(oat_data.buffer_len);
if (!priv.buffer) {
rc = -ENOMEM;
goto out;
@ -4740,7 +4741,7 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
rc = -EFAULT;
out_free:
kfree(priv.buffer);
vfree(priv.buffer);
out:
return rc;
}
@ -5706,6 +5707,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
if (IS_IQD(card))
dev->features |= NETIF_F_SG;
}
return dev;
@ -5768,8 +5771,10 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
qeth_update_from_chp_desc(card);
card->dev = qeth_alloc_netdev(card);
if (!card->dev)
if (!card->dev) {
rc = -ENOMEM;
goto err_card;
}
qeth_determine_capabilities(card);
enforced_disc = qeth_enforce_discipline(card);

View file

@ -423,7 +423,7 @@ static int qeth_l2_process_inbound_buffer(struct qeth_card *card,
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;

View file

@ -1390,7 +1390,7 @@ static int qeth_l3_process_inbound_buffer(struct qeth_card *card,
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;

View file

@ -362,8 +362,8 @@ struct mlx5_frag_buf {
struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
u32 frag_sz_m1;
u32 strides_offset;
u16 frag_sz_m1;
u16 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key)
}
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
u32 strides_offset,
u16 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{
fbc->log_stride = log_stride;
@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);

View file

@ -30,7 +30,7 @@ struct nf_conn_timeout {
};
static inline unsigned int *
nf_ct_timeout_data(struct nf_conn_timeout *t)
nf_ct_timeout_data(const struct nf_conn_timeout *t)
{
struct nf_ct_timeout *timeout;

View file

@ -939,9 +939,6 @@ struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
WARN_ON_ONCE(!in_task());
if (!sock_flag(sk, SOCK_ZEROCOPY))
return NULL;
skb = sock_omalloc(sk, 0, GFP_KERNEL);
if (!skb)
return NULL;

View file

@ -599,6 +599,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
nextp = &fp->next;
fp->prev = NULL;
memset(&fp->rbnode, 0, sizeof(fp->rbnode));
fp->sk = NULL;
head->data_len += fp->len;
head->len += fp->len;
if (head->ip_summed != fp->ip_summed)

View file

@ -178,6 +178,9 @@ static void ipgre_err(struct sk_buff *skb, u32 info,
if (tpi->proto == htons(ETH_P_TEB))
itn = net_generic(net, gre_tap_net_id);
else if (tpi->proto == htons(ETH_P_ERSPAN) ||
tpi->proto == htons(ETH_P_ERSPAN2))
itn = net_generic(net, erspan_net_id);
else
itn = net_generic(net, ipgre_net_id);
@ -328,6 +331,8 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi,
ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error);
return PACKET_RCVD;
}
return PACKET_REJECT;
drop:
kfree_skb(skb);
return PACKET_RCVD;

View file

@ -106,6 +106,10 @@ config NF_NAT_IPV4
if NF_NAT_IPV4
config NF_NAT_MASQUERADE_IPV4
bool
if NF_TABLES
config NFT_CHAIN_NAT_IPV4
depends on NF_TABLES_IPV4
tristate "IPv4 nf_tables nat chain support"
@ -115,9 +119,6 @@ config NFT_CHAIN_NAT_IPV4
packet transformations such as the source, destination address and
source and destination ports.
config NF_NAT_MASQUERADE_IPV4
bool
config NFT_MASQ_IPV4
tristate "IPv4 masquerading support for nf_tables"
depends on NF_TABLES_IPV4
@ -135,6 +136,7 @@ config NFT_REDIR_IPV4
help
This is the expression that provides IPv4 redirect support for
nf_tables.
endif # NF_TABLES
config NF_NAT_SNMP_BASIC
tristate "Basic SNMP-ALG support"

View file

@ -1185,7 +1185,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
flags = msg->msg_flags;
if (flags & MSG_ZEROCOPY && size) {
if (flags & MSG_ZEROCOPY && size && sock_flag(sk, SOCK_ZEROCOPY)) {
if (sk->sk_state != TCP_ESTABLISHED) {
err = -EINVAL;
goto out_err;

View file

@ -6367,8 +6367,8 @@ static bool tcp_syn_flood_action(const struct sock *sk,
if (!queue->synflood_warned &&
net->ipv4.sysctl_tcp_syncookies != 2 &&
xchg(&queue->synflood_warned, 1) == 0)
pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
proto, ntohs(tcp_hdr(skb)->dest), msg);
net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
proto, ntohs(tcp_hdr(skb)->dest), msg);
return want_cookie;
}

View file

@ -445,6 +445,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *prev, struct net_devic
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
fp->sk = NULL;
}
sub_frag_mem_limit(fq->q.net, head->truesize);

View file

@ -351,20 +351,28 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
memcpy(&phs_hdr->iucv_hdr, imsg, sizeof(struct iucv_message));
skb->dev = iucv->hs_dev;
if (!skb->dev)
return -ENODEV;
if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev))
return -ENETDOWN;
if (!skb->dev) {
err = -ENODEV;
goto err_free;
}
if (!(skb->dev->flags & IFF_UP) || !netif_carrier_ok(skb->dev)) {
err = -ENETDOWN;
goto err_free;
}
if (skb->len > skb->dev->mtu) {
if (sock->sk_type == SOCK_SEQPACKET)
return -EMSGSIZE;
else
skb_trim(skb, skb->dev->mtu);
if (sock->sk_type == SOCK_SEQPACKET) {
err = -EMSGSIZE;
goto err_free;
}
skb_trim(skb, skb->dev->mtu);
}
skb->protocol = cpu_to_be16(ETH_P_AF_IUCV);
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb)
return -ENOMEM;
if (!nskb) {
err = -ENOMEM;
goto err_free;
}
skb_queue_tail(&iucv->send_skb_q, nskb);
err = dev_queue_xmit(skb);
if (net_xmit_eval(err)) {
@ -375,6 +383,10 @@ static int afiucv_hs_send(struct iucv_message *imsg, struct sock *sock,
WARN_ON(atomic_read(&iucv->msg_recv) < 0);
}
return net_xmit_eval(err);
err_free:
kfree_skb(skb);
return err;
}
static struct sock *__iucv_get_sock_by_name(char *nm)
@ -1167,7 +1179,7 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
err = afiucv_hs_send(&txmsg, sk, skb, 0);
if (err) {
atomic_dec(&iucv->msg_sent);
goto fail;
goto out;
}
} else { /* Classic VM IUCV transport */
skb_queue_tail(&iucv->send_skb_q, skb);
@ -2155,8 +2167,8 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
struct sock *sk;
struct iucv_sock *iucv;
struct af_iucv_trans_hdr *trans_hdr;
int err = NET_RX_SUCCESS;
char nullstring[8];
int err = 0;
if (skb->len < (ETH_HLEN + sizeof(struct af_iucv_trans_hdr))) {
WARN_ONCE(1, "AF_IUCV too short skb, len=%d, min=%d",
@ -2254,7 +2266,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,
err = afiucv_hs_callback_rx(sk, skb);
break;
default:
;
kfree_skb(skb);
}
return err;

View file

@ -1874,7 +1874,7 @@ static void iucv_pm_complete(struct device *dev)
* Returns 0 if there are still iucv pathes defined
* 1 if there are no iucv pathes defined
*/
int iucv_path_table_empty(void)
static int iucv_path_table_empty(void)
{
int i;

View file

@ -771,13 +771,13 @@ config NETFILTER_XT_TARGET_CHECKSUM
depends on NETFILTER_ADVANCED
---help---
This option adds a `CHECKSUM' target, which can be used in the iptables mangle
table.
table to work around buggy DHCP clients in virtualized environments.
You can use this target to compute and fill in the checksum in
a packet that lacks a checksum. This is particularly useful,
if you need to work around old applications such as dhcp clients,
that do not work well with checksum offloads, but don't want to disable
checksum offload in your device.
Some old DHCP clients drop packets because they are not aware
that the checksum would normally be offloaded to hardware and
thus should be considered valid.
This target can be used to fill in the checksum using iptables
when such packets are sent via a virtual network device.
To compile it as a module, choose M here. If unsure, say N.

View file

@ -776,9 +776,26 @@ static const struct nf_hook_ops ipv6_conntrack_ops[] = {
};
#endif
static int nf_ct_tcp_fixup(struct nf_conn *ct, void *_nfproto)
{
u8 nfproto = (unsigned long)_nfproto;
if (nf_ct_l3num(ct) != nfproto)
return 0;
if (nf_ct_protonum(ct) == IPPROTO_TCP &&
ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED) {
ct->proto.tcp.seen[0].td_maxwin = 0;
ct->proto.tcp.seen[1].td_maxwin = 0;
}
return 0;
}
static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
{
struct nf_conntrack_net *cnet = net_generic(net, nf_conntrack_net_id);
bool fixup_needed = false;
int err = 0;
mutex_lock(&nf_ct_proto_mutex);
@ -798,6 +815,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
ARRAY_SIZE(ipv4_conntrack_ops));
if (err)
cnet->users4 = 0;
else
fixup_needed = true;
break;
#if IS_ENABLED(CONFIG_IPV6)
case NFPROTO_IPV6:
@ -814,6 +833,8 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
ARRAY_SIZE(ipv6_conntrack_ops));
if (err)
cnet->users6 = 0;
else
fixup_needed = true;
break;
#endif
default:
@ -822,6 +843,11 @@ static int nf_ct_netns_do_get(struct net *net, u8 nfproto)
}
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
if (fixup_needed)
nf_ct_iterate_cleanup_net(net, nf_ct_tcp_fixup,
(void *)(unsigned long)nfproto, 0, 0);
return err;
}

View file

@ -675,7 +675,7 @@ static int nlattr_to_dccp(struct nlattr *cda[], struct nf_conn *ct)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -697,6 +697,8 @@ static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
timeouts[CTA_TIMEOUT_DCCP_UNSPEC] = timeouts[CTA_TIMEOUT_DCCP_REQUEST];
return 0;
}
@ -726,7 +728,7 @@ dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
[CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
[CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
/* template, data assigned later */
@ -827,6 +829,11 @@ static int dccp_init_net(struct net *net, u_int16_t proto)
dn->dccp_timeout[CT_DCCP_CLOSEREQ] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_CLOSING] = 64 * HZ;
dn->dccp_timeout[CT_DCCP_TIMEWAIT] = 2 * DCCP_MSL;
/* timeouts[0] is unused, make it same as SYN_SENT so
* ->timeouts[0] contains 'new' timeout, like udp or icmp.
*/
dn->dccp_timeout[CT_DCCP_NONE] = dn->dccp_timeout[CT_DCCP_REQUEST];
}
return dccp_kmemdup_sysctl_table(net, pn, dn);
@ -856,7 +863,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
@ -864,7 +871,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4 = {
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = dccp_init_net,
.get_net_proto = dccp_get_net_proto,
};
@ -889,7 +896,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = dccp_timeout_nlattr_to_obj,
.obj_to_nlattr = dccp_timeout_obj_to_nlattr,
@ -897,7 +904,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6 = {
.obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
.nla_policy = dccp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = dccp_init_net,
.get_net_proto = dccp_get_net_proto,
};

View file

@ -70,7 +70,7 @@ static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
return ret;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -113,7 +113,7 @@ static const struct nla_policy
generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
[CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table generic_sysctl_table[] = {
@ -164,7 +164,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
.pkt_to_tuple = generic_pkt_to_tuple,
.packet = generic_packet,
.new = generic_new,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = generic_timeout_nlattr_to_obj,
.obj_to_nlattr = generic_timeout_obj_to_nlattr,
@ -172,7 +172,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_generic =
.obj_size = sizeof(unsigned int),
.nla_policy = generic_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = generic_init_net,
.get_net_proto = generic_get_net_proto,
};

View file

@ -285,7 +285,7 @@ static void gre_destroy(struct nf_conn *ct)
nf_ct_gre_keymap_destroy(master);
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -334,7 +334,7 @@ gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
[CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
static int gre_init_net(struct net *net, u_int16_t proto)
{
@ -367,7 +367,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = gre_timeout_nlattr_to_obj,
.obj_to_nlattr = gre_timeout_obj_to_nlattr,
@ -375,7 +375,7 @@ static const struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 = {
.obj_size = sizeof(unsigned int) * GRE_CT_MAX,
.nla_policy = gre_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.net_id = &proto_gre_net_id,
.init_net = gre_init_net,
};

View file

@ -273,7 +273,7 @@ static unsigned int icmp_nlattr_tuple_size(void)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -313,7 +313,7 @@ static const struct nla_policy
icmp_timeout_nla_policy[CTA_TIMEOUT_ICMP_MAX+1] = {
[CTA_TIMEOUT_ICMP_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table icmp_sysctl_table[] = {
@ -374,7 +374,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
.nlattr_to_tuple = icmp_nlattr_to_tuple,
.nla_policy = icmp_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmp_timeout_nlattr_to_obj,
.obj_to_nlattr = icmp_timeout_obj_to_nlattr,
@ -382,7 +382,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp =
.obj_size = sizeof(unsigned int),
.nla_policy = icmp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = icmp_init_net,
.get_net_proto = icmp_get_net_proto,
};

View file

@ -274,7 +274,7 @@ static unsigned int icmpv6_nlattr_tuple_size(void)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -314,7 +314,7 @@ static const struct nla_policy
icmpv6_timeout_nla_policy[CTA_TIMEOUT_ICMPV6_MAX+1] = {
[CTA_TIMEOUT_ICMPV6_TIMEOUT] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table icmpv6_sysctl_table[] = {
@ -373,7 +373,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
.nlattr_to_tuple = icmpv6_nlattr_to_tuple,
.nla_policy = icmpv6_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = icmpv6_timeout_nlattr_to_obj,
.obj_to_nlattr = icmpv6_timeout_obj_to_nlattr,
@ -381,7 +381,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6 =
.obj_size = sizeof(unsigned int),
.nla_policy = icmpv6_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = icmpv6_init_net,
.get_net_proto = icmpv6_get_net_proto,
};

View file

@ -591,7 +591,7 @@ static int nlattr_to_sctp(struct nlattr *cda[], struct nf_conn *ct)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -613,6 +613,8 @@ static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
}
}
timeouts[CTA_TIMEOUT_SCTP_UNSPEC] = timeouts[CTA_TIMEOUT_SCTP_CLOSED];
return 0;
}
@ -644,7 +646,7 @@ sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
[CTA_TIMEOUT_SCTP_HEARTBEAT_SENT] = { .type = NLA_U32 },
[CTA_TIMEOUT_SCTP_HEARTBEAT_ACKED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
@ -743,6 +745,11 @@ static int sctp_init_net(struct net *net, u_int16_t proto)
for (i = 0; i < SCTP_CONNTRACK_MAX; i++)
sn->timeouts[i] = sctp_timeouts[i];
/* timeouts[0] is unused, init it so ->timeouts[0] contains
* 'new' timeout, like udp or icmp.
*/
sn->timeouts[0] = sctp_timeouts[SCTP_CONNTRACK_CLOSED];
}
return sctp_kmemdup_sysctl_table(pn, sn);
@ -773,7 +780,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
@ -781,7 +788,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 = {
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = sctp_init_net,
.get_net_proto = sctp_get_net_proto,
};
@ -806,7 +813,8 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#endif
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = sctp_timeout_nlattr_to_obj,
.obj_to_nlattr = sctp_timeout_obj_to_nlattr,
@ -814,8 +822,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 = {
.obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
.nla_policy = sctp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = sctp_init_net,
.get_net_proto = sctp_get_net_proto,
};

View file

@ -1279,7 +1279,7 @@ static unsigned int tcp_nlattr_tuple_size(void)
}
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -1301,6 +1301,7 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[TCP_CONNTRACK_SYN_SENT] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
}
if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
timeouts[TCP_CONNTRACK_SYN_RECV] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
@ -1341,6 +1342,8 @@ static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[],
timeouts[TCP_CONNTRACK_UNACK] =
ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
}
timeouts[CTA_TIMEOUT_TCP_UNSPEC] = timeouts[CTA_TIMEOUT_TCP_SYN_SENT];
return 0;
}
@ -1391,7 +1394,7 @@ static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
[CTA_TIMEOUT_TCP_RETRANS] = { .type = NLA_U32 },
[CTA_TIMEOUT_TCP_UNACK] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table tcp_sysctl_table[] = {
@ -1518,6 +1521,10 @@ static int tcp_init_net(struct net *net, u_int16_t proto)
for (i = 0; i < TCP_CONNTRACK_TIMEOUT_MAX; i++)
tn->timeouts[i] = tcp_timeouts[i];
/* timeouts[0] is unused, make it same as SYN_SENT so
* ->timeouts[0] contains 'new' timeout, like udp or icmp.
*/
tn->timeouts[0] = tcp_timeouts[TCP_CONNTRACK_SYN_SENT];
tn->tcp_loose = nf_ct_tcp_loose;
tn->tcp_be_liberal = nf_ct_tcp_be_liberal;
tn->tcp_max_retrans = nf_ct_tcp_max_retrans;
@ -1551,7 +1558,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
.nlattr_size = TCP_NLATTR_SIZE,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
@ -1560,7 +1567,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 =
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = tcp_init_net,
.get_net_proto = tcp_get_net_proto,
};
@ -1586,7 +1593,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = tcp_timeout_nlattr_to_obj,
.obj_to_nlattr = tcp_timeout_obj_to_nlattr,
@ -1595,7 +1602,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 =
TCP_CONNTRACK_TIMEOUT_MAX,
.nla_policy = tcp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = tcp_init_net,
.get_net_proto = tcp_get_net_proto,
};

View file

@ -171,7 +171,7 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
return NF_ACCEPT;
}
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_cttimeout.h>
@ -221,7 +221,7 @@ udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
[CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
[CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
};
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_SYSCTL
static struct ctl_table udp_sysctl_table[] = {
@ -292,7 +292,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@ -300,7 +300,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
@ -321,7 +321,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@ -329,7 +329,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
@ -350,7 +350,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@ -358,7 +358,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
@ -379,7 +379,7 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
.ctnl_timeout = {
.nlattr_to_obj = udp_timeout_nlattr_to_obj,
.obj_to_nlattr = udp_timeout_obj_to_nlattr,
@ -387,10 +387,9 @@ const struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 =
.obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
.nla_policy = udp_timeout_nla_policy,
},
#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
.init_net = udp_init_net,
.get_net_proto = udp_get_net_proto,
};
EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udplite6);
#endif
#include <net/netfilter/nf_conntrack_timeout.h>

View file

@ -4637,6 +4637,7 @@ static int nft_flush_set(const struct nft_ctx *ctx,
}
set->ndeact++;
nft_set_elem_deactivate(ctx->net, set, elem);
nft_trans_elem_set(trans) = set;
nft_trans_elem(trans) = *elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);

View file

@ -489,8 +489,8 @@ err:
return err;
}
static struct ctnl_timeout *
ctnl_timeout_find_get(struct net *net, const char *name)
static struct nf_ct_timeout *ctnl_timeout_find_get(struct net *net,
const char *name)
{
struct ctnl_timeout *timeout, *matching = NULL;
@ -509,7 +509,7 @@ ctnl_timeout_find_get(struct net *net, const char *name)
break;
}
err:
return matching;
return matching ? &matching->timeout : NULL;
}
static void ctnl_timeout_put(struct nf_ct_timeout *t)

View file

@ -233,6 +233,7 @@ static void nfqnl_reinject(struct nf_queue_entry *entry, unsigned int verdict)
int err;
if (verdict == NF_ACCEPT ||
verdict == NF_REPEAT ||
verdict == NF_STOP) {
rcu_read_lock();
ct_hook = rcu_dereference(nf_ct_hook);

View file

@ -799,7 +799,7 @@ err:
}
struct nft_ct_timeout_obj {
struct nf_conn *tmpl;
struct nf_ct_timeout *timeout;
u8 l4proto;
};
@ -809,26 +809,42 @@ static void nft_ct_timeout_obj_eval(struct nft_object *obj,
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
struct nf_conn *ct = (struct nf_conn *)skb_nfct(pkt->skb);
struct sk_buff *skb = pkt->skb;
struct nf_conn_timeout *timeout;
const unsigned int *values;
if (ct ||
priv->l4proto != pkt->tprot)
if (priv->l4proto != pkt->tprot)
return;
nf_ct_set(skb, priv->tmpl, IP_CT_NEW);
if (!ct || nf_ct_is_template(ct) || nf_ct_is_confirmed(ct))
return;
timeout = nf_ct_timeout_find(ct);
if (!timeout) {
timeout = nf_ct_timeout_ext_add(ct, priv->timeout, GFP_ATOMIC);
if (!timeout) {
regs->verdict.code = NF_DROP;
return;
}
}
rcu_assign_pointer(timeout->timeout, priv->timeout);
/* adjust the timeout as per 'new' state. ct is unconfirmed,
* so the current timestamp must not be added.
*/
values = nf_ct_timeout_data(timeout);
if (values)
nf_ct_refresh(ct, pkt->skb, values[0]);
}
static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
const struct nlattr * const tb[],
struct nft_object *obj)
{
const struct nf_conntrack_zone *zone = &nf_ct_zone_dflt;
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
const struct nf_conntrack_l4proto *l4proto;
struct nf_conn_timeout *timeout_ext;
struct nf_ct_timeout *timeout;
int l3num = ctx->family;
struct nf_conn *tmpl;
__u8 l4num;
int ret;
@ -863,28 +879,14 @@ static int nft_ct_timeout_obj_init(const struct nft_ctx *ctx,
timeout->l3num = l3num;
timeout->l4proto = l4proto;
tmpl = nf_ct_tmpl_alloc(ctx->net, zone, GFP_ATOMIC);
if (!tmpl) {
ret = -ENOMEM;
goto err_free_timeout;
}
timeout_ext = nf_ct_timeout_ext_add(tmpl, timeout, GFP_ATOMIC);
if (!timeout_ext) {
ret = -ENOMEM;
goto err_free_tmpl;
}
ret = nf_ct_netns_get(ctx->net, ctx->family);
if (ret < 0)
goto err_free_tmpl;
priv->tmpl = tmpl;
goto err_free_timeout;
priv->timeout = timeout;
return 0;
err_free_tmpl:
nf_ct_tmpl_free(tmpl);
err_free_timeout:
kfree(timeout);
err_proto_put:
@ -896,22 +898,19 @@ static void nft_ct_timeout_obj_destroy(const struct nft_ctx *ctx,
struct nft_object *obj)
{
struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
struct nf_ct_timeout *timeout;
struct nf_ct_timeout *timeout = priv->timeout;
timeout = rcu_dereference_raw(t->timeout);
nf_ct_untimeout(ctx->net, timeout);
nf_ct_l4proto_put(timeout->l4proto);
nf_ct_netns_put(ctx->net, ctx->family);
nf_ct_tmpl_free(priv->tmpl);
kfree(priv->timeout);
}
static int nft_ct_timeout_obj_dump(struct sk_buff *skb,
struct nft_object *obj, bool reset)
{
const struct nft_ct_timeout_obj *priv = nft_obj_data(obj);
const struct nf_conn_timeout *t = nf_ct_timeout_find(priv->tmpl);
const struct nf_ct_timeout *timeout = rcu_dereference_raw(t->timeout);
const struct nf_ct_timeout *timeout = priv->timeout;
struct nlattr *nest_params;
int ret;

View file

@ -16,6 +16,9 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter/xt_CHECKSUM.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael S. Tsirkin <mst@redhat.com>");
MODULE_DESCRIPTION("Xtables: checksum modification");
@ -25,7 +28,7 @@ MODULE_ALIAS("ip6t_CHECKSUM");
static unsigned int
checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
{
if (skb->ip_summed == CHECKSUM_PARTIAL)
if (skb->ip_summed == CHECKSUM_PARTIAL && !skb_is_gso(skb))
skb_checksum_help(skb);
return XT_CONTINUE;
@ -34,6 +37,8 @@ checksum_tg(struct sk_buff *skb, const struct xt_action_param *par)
static int checksum_tg_check(const struct xt_tgchk_param *par)
{
const struct xt_CHECKSUM_info *einfo = par->targinfo;
const struct ip6t_ip6 *i6 = par->entryinfo;
const struct ipt_ip *i4 = par->entryinfo;
if (einfo->operation & ~XT_CHECKSUM_OP_FILL) {
pr_info_ratelimited("unsupported CHECKSUM operation %x\n",
@ -43,6 +48,21 @@ static int checksum_tg_check(const struct xt_tgchk_param *par)
if (!einfo->operation)
return -EINVAL;
switch (par->family) {
case NFPROTO_IPV4:
if (i4->proto == IPPROTO_UDP &&
(i4->invflags & XT_INV_PROTO) == 0)
return 0;
break;
case NFPROTO_IPV6:
if ((i6->flags & IP6T_F_PROTO) &&
i6->proto == IPPROTO_UDP &&
(i6->invflags & XT_INV_PROTO) == 0)
return 0;
break;
}
pr_warn_once("CHECKSUM should be avoided. If really needed, restrict with \"-p udp\" and only use in OUTPUT\n");
return 0;
}

View file

@ -125,6 +125,7 @@ xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
{
struct xt_cluster_match_info *info = par->matchinfo;
int ret;
if (info->total_nodes > XT_CLUSTER_NODES_MAX) {
pr_info_ratelimited("you have exceeded the maximum number of cluster nodes (%u > %u)\n",
@ -135,7 +136,17 @@ static int xt_cluster_mt_checkentry(const struct xt_mtchk_param *par)
pr_info_ratelimited("node mask cannot exceed total number of nodes\n");
return -EDOM;
}
return 0;
ret = nf_ct_netns_get(par->net, par->family);
if (ret < 0)
pr_info_ratelimited("cannot load conntrack support for proto=%u\n",
par->family);
return ret;
}
static void xt_cluster_mt_destroy(const struct xt_mtdtor_param *par)
{
nf_ct_netns_put(par->net, par->family);
}
static struct xt_match xt_cluster_match __read_mostly = {
@ -144,6 +155,7 @@ static struct xt_match xt_cluster_match __read_mostly = {
.match = xt_cluster_mt,
.checkentry = xt_cluster_mt_checkentry,
.matchsize = sizeof(struct xt_cluster_match_info),
.destroy = xt_cluster_mt_destroy,
.me = THIS_MODULE,
};

View file

@ -1057,7 +1057,7 @@ static struct xt_match hashlimit_mt_reg[] __read_mostly = {
static void *dl_seq_start(struct seq_file *s, loff_t *pos)
__acquires(htable->lock)
{
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket;
spin_lock_bh(&htable->lock);
@ -1074,7 +1074,7 @@ static void *dl_seq_start(struct seq_file *s, loff_t *pos)
static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
*pos = ++(*bucket);
@ -1088,7 +1088,7 @@ static void *dl_seq_next(struct seq_file *s, void *v, loff_t *pos)
static void dl_seq_stop(struct seq_file *s, void *v)
__releases(htable->lock)
{
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
if (!IS_ERR(bucket))
@ -1130,7 +1130,7 @@ static void dl_seq_print(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
@ -1145,7 +1145,7 @@ static int dl_seq_real_show_v2(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
@ -1160,7 +1160,7 @@ static int dl_seq_real_show_v1(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
struct seq_file *s)
{
struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *ht = PDE_DATA(file_inode(s->file));
spin_lock(&ent->lock);
/* recalculate to show accurate numbers */
@ -1174,7 +1174,7 @@ static int dl_seq_real_show(struct dsthash_ent *ent, u_int8_t family,
static int dl_seq_show_v2(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = (unsigned int *)v;
struct dsthash_ent *ent;
@ -1188,7 +1188,7 @@ static int dl_seq_show_v2(struct seq_file *s, void *v)
static int dl_seq_show_v1(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;
@ -1202,7 +1202,7 @@ static int dl_seq_show_v1(struct seq_file *s, void *v)
static int dl_seq_show(struct seq_file *s, void *v)
{
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->private));
struct xt_hashlimit_htable *htable = PDE_DATA(file_inode(s->file));
unsigned int *bucket = v;
struct dsthash_ent *ent;

View file

@ -76,11 +76,13 @@ struct rds_sock *rds_find_bound(const struct in6_addr *addr, __be16 port,
struct rds_sock *rs;
__rds_create_bind_key(key, addr, port, scope_id);
rs = rhashtable_lookup_fast(&bind_hash_table, key, ht_parms);
rcu_read_lock();
rs = rhashtable_lookup(&bind_hash_table, key, ht_parms);
if (rs && !sock_flag(rds_rs_to_sk(rs), SOCK_DEAD))
rds_sock_addref(rs);
else
rs = NULL;
rcu_read_unlock();
rdsdebug("returning rs %p for %pI6c:%u\n", rs, addr,
ntohs(port));
@ -235,6 +237,7 @@ int rds_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
}
sock_set_flag(sk, SOCK_RCU_FREE);
ret = rds_add_bound(rs, binding_addr, &port, scope_id);
if (ret)
goto out;

View file

@ -317,7 +317,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
&metadata->u.tun_info,
opts_len, extack);
if (ret < 0)
goto err_out;
goto release_tun_meta;
}
metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
@ -333,23 +333,24 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
&act_tunnel_key_ops, bind, true);
if (ret) {
NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
goto err_out;
goto release_tun_meta;
}
ret = ACT_P_CREATED;
} else if (!ovr) {
tcf_idr_release(*a, bind);
NL_SET_ERR_MSG(extack, "TC IDR already exists");
return -EEXIST;
ret = -EEXIST;
goto release_tun_meta;
}
t = to_tunnel_key(*a);
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
tcf_idr_release(*a, bind);
NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
return -ENOMEM;
ret = -ENOMEM;
exists = true;
goto release_tun_meta;
}
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
@ -367,6 +368,9 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
return ret;
release_tun_meta:
dst_release(&metadata->dst);
err_out:
if (exists)
tcf_idr_release(*a, bind);
@ -408,8 +412,10 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
nla_put_u8(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE,
opt->type) ||
nla_put(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA,
opt->length * 4, opt + 1))
opt->length * 4, opt + 1)) {
nla_nest_cancel(skb, start);
return -EMSGSIZE;
}
len -= sizeof(struct geneve_opt) + opt->length * 4;
src += sizeof(struct geneve_opt) + opt->length * 4;
@ -423,7 +429,7 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
struct nlattr *start;
int err;
int err = -EINVAL;
if (!info->options_len)
return 0;
@ -435,9 +441,11 @@ static int tunnel_key_opts_dump(struct sk_buff *skb,
if (info->key.tun_flags & TUNNEL_GENEVE_OPT) {
err = tunnel_key_geneve_opts_dump(skb, info);
if (err)
return err;
goto err_out;
} else {
return -EINVAL;
err_out:
nla_nest_cancel(skb, start);
return err;
}
nla_nest_end(skb, start);

View file

@ -185,6 +185,10 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
return -ENOMEM;
buf->sk = msg->dst_sk;
if (__tipc_dump_start(&cb, msg->net)) {
kfree_skb(buf);
return -ENOMEM;
}
do {
int rem;
@ -216,6 +220,7 @@ static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
err = 0;
err_out:
tipc_dump_done(&cb);
kfree_skb(buf);
if (err == -EMSGSIZE) {

View file

@ -576,6 +576,7 @@ static int tipc_release(struct socket *sock)
sk_stop_timer(sk, &sk->sk_timer);
tipc_sk_remove(tsk);
sock_orphan(sk);
/* Reject any messages that accumulated in backlog queue */
release_sock(sk);
tipc_dest_list_purge(&tsk->cong_links);
@ -3229,7 +3230,7 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
struct netlink_callback *cb,
struct tipc_sock *tsk))
{
struct rhashtable_iter *iter = (void *)cb->args[0];
struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_sock *tsk;
int err;
@ -3265,8 +3266,14 @@ EXPORT_SYMBOL(tipc_nl_sk_walk);
int tipc_dump_start(struct netlink_callback *cb)
{
struct rhashtable_iter *iter = (void *)cb->args[0];
struct net *net = sock_net(cb->skb->sk);
return __tipc_dump_start(cb, sock_net(cb->skb->sk));
}
EXPORT_SYMBOL(tipc_dump_start);
int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
{
/* tipc_nl_name_table_dump() uses cb->args[0...3]. */
struct rhashtable_iter *iter = (void *)cb->args[4];
struct tipc_net *tn = tipc_net(net);
if (!iter) {
@ -3274,17 +3281,16 @@ int tipc_dump_start(struct netlink_callback *cb)
if (!iter)
return -ENOMEM;
cb->args[0] = (long)iter;
cb->args[4] = (long)iter;
}
rhashtable_walk_enter(&tn->sk_rht, iter);
return 0;
}
EXPORT_SYMBOL(tipc_dump_start);
int tipc_dump_done(struct netlink_callback *cb)
{
struct rhashtable_iter *hti = (void *)cb->args[0];
struct rhashtable_iter *hti = (void *)cb->args[4];
rhashtable_walk_exit(hti);
kfree(hti);

View file

@ -69,5 +69,6 @@ int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
struct netlink_callback *cb,
struct tipc_sock *tsk));
int tipc_dump_start(struct netlink_callback *cb);
int __tipc_dump_start(struct netlink_callback *cb, struct net *net);
int tipc_dump_done(struct netlink_callback *cb);
#endif

View file

@ -125,6 +125,9 @@ static int alloc_encrypted_sg(struct sock *sk, int len)
&ctx->sg_encrypted_num_elem,
&ctx->sg_encrypted_size, 0);
if (rc == -ENOSPC)
ctx->sg_encrypted_num_elem = ARRAY_SIZE(ctx->sg_encrypted_data);
return rc;
}
@ -138,6 +141,9 @@ static int alloc_plaintext_sg(struct sock *sk, int len)
&ctx->sg_plaintext_num_elem, &ctx->sg_plaintext_size,
tls_ctx->pending_open_record_frags);
if (rc == -ENOSPC)
ctx->sg_plaintext_num_elem = ARRAY_SIZE(ctx->sg_plaintext_data);
return rc;
}