Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) Fix various build warnings in tlan/qed/xen-netback drivers, from
    Arnd Bergmann.

 2) Propagate proper error code in strparser's strp_recv(), from Geert
    Uytterhoeven.

 3) Fix accidental broadcast of RTM_GETTFILTER responses, from Eric
    Dumazret.

 4) Need to use list_for_each_entry_safe() in qed driver, from Wei
    Yongjun.

 5) Openvswitch 802.1AD bug fixes from Jiri Benc.

 6) Cure BUILD_BUG_ON() in mlx5 driver, from Tom Herbert.

 7) Fix UDP ipv6 checksumming in netvsc driver, from Stephen Hemminger.

 8) stmmac driver fixes from Giuseppe CAVALLARO.

 9) Fix access to mangled IP6CB in tcp, from Eric Dumazet.

10) Fix info leaks in tipc and rtnetlink, from Dan Carpenter.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (27 commits)
  net: bridge: add the multicast_flood flag attribute to brport_attrs
  net: axienet: Remove unused parameter from __axienet_device_reset
  liquidio: CN23XX: fix a loop timeout
  net: rtnl: info leak in rtnl_fill_vfinfo()
  tipc: info leak in __tipc_nl_add_udp_addr()
  net: ipv4: Do not drop to make_route if oif is l3mdev
  net: phy: Trigger state machine on state change and not polling.
  ipv6: tcp: restore IP6CB for pktoptions skbs
  netvsc: Remove mistaken udp.h inclusion.
  xen-netback: fix type mismatch warning
  stmmac: fix error check when init ptp
  stmmac: fix ptp init for gmac4
  qed: fix old-style function definition
  netvsc: fix checksum on UDP IPV6
  net_sched: reorder pernet ops and act ops registrations
  xen-netback: fix guest Rx stall detection (after guest Rx refactor)
  drivers/ptp: Fix kernel memory disclosure
  net/mlx5: Add MLX5_ARRAY_SET64 to fix BUILD_BUG_ON
  qmi_wwan: add support for Quectel EC21 and EC25
  openvswitch: add NETIF_F_HW_VLAN_STAG_TX to internal dev
  ...
This commit is contained in:
Linus Torvalds 2016-10-13 21:40:23 -07:00
commit 29fbff8698
29 changed files with 294 additions and 114 deletions

View file

@ -693,7 +693,7 @@ static int cn23xx_enable_io_queues(struct octeon_device *oct)
while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
!(reg_val &
CN23XX_PKT_INPUT_CTL_QUIET) &&
loop--) {
--loop) {
reg_val = octeon_read_csr64(
oct,
CN23XX_SLI_IQ_PKT_CONTROL64(q_no));

View file

@ -287,7 +287,7 @@ retry:
goto retry;
}
MLX5_SET64(manage_pages_in, in, pas[i], addr);
MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
}
MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
@ -344,7 +344,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
if (fwp->func_id != func_id)
continue;
MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
i++;
}

View file

@ -1517,7 +1517,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
struct qed_ll2_info ll2_info;
struct qed_ll2_buffer *buffer;
struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
int rc, i;
@ -1587,7 +1587,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
/* Post all Rx buffers to FW */
spin_lock_bh(&cdev->ll2->lock);
list_for_each_entry(buffer, &cdev->ll2->list, list) {
list_for_each_entry_safe(buffer, tmp_buffer, &cdev->ll2->list, list) {
rc = qed_ll2_post_rx_buffer(QED_LEADING_HWFN(cdev),
cdev->ll2->handle,
buffer->phys_addr, 0, buffer, 1);

View file

@ -2947,7 +2947,7 @@ static const struct qed_rdma_ops qed_rdma_ops_pass = {
.roce_ll2_stats = &qed_roce_ll2_stats,
};
const struct qed_rdma_ops *qed_get_rdma_ops()
const struct qed_rdma_ops *qed_get_rdma_ops(void)
{
return &qed_rdma_ops_pass;
}

View file

@ -650,20 +650,27 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (IS_ERR(priv->clk_ptp_ref)) {
priv->clk_ptp_rate = clk_get_rate(priv->stmmac_clk);
priv->clk_ptp_ref = NULL;
netdev_dbg(priv->dev, "PTP uses main clock\n");
} else {
clk_prepare_enable(priv->clk_ptp_ref);
priv->clk_ptp_rate = clk_get_rate(priv->clk_ptp_ref);
netdev_dbg(priv->dev, "PTP rate %d\n", priv->clk_ptp_rate);
}
priv->adv_ts = 0;
if (priv->dma_cap.atime_stamp && priv->extend_desc)
/* Check if adv_ts can be enabled for dwmac 4.x core */
if (priv->plat->has_gmac4 && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
/* Dwmac 3.x core with extend_desc can support adv_ts */
else if (priv->extend_desc && priv->dma_cap.atime_stamp)
priv->adv_ts = 1;
if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
pr_debug("IEEE 1588-2002 Time Stamp supported\n");
if (priv->dma_cap.time_stamp)
netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
if (netif_msg_hw(priv) && priv->adv_ts)
pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
if (priv->adv_ts)
netdev_info(priv->dev,
"IEEE 1588-2008 Advanced Timestamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
@ -1702,8 +1709,8 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (init_ptp) {
ret = stmmac_init_ptp(priv);
if (ret && ret != -EOPNOTSUPP)
pr_warn("%s: failed PTP initialisation\n", __func__);
if (ret)
netdev_warn(priv->dev, "PTP support cannot init.\n");
}
#ifdef CONFIG_DEBUG_FS

View file

@ -186,10 +186,12 @@ int stmmac_ptp_register(struct stmmac_priv *priv)
priv->device);
if (IS_ERR(priv->ptp_clock)) {
priv->ptp_clock = NULL;
pr_err("ptp_clock_register() failed on %s\n", priv->dev->name);
} else if (priv->ptp_clock)
pr_debug("Added PTP HW clock successfully on %s\n",
priv->dev->name);
return PTR_ERR(priv->ptp_clock);
}
spin_lock_init(&priv->ptp_lock);
netdev_dbg(priv->dev, "Added PTP HW clock successfully\n");
return 0;
}

View file

@ -610,8 +610,8 @@ err_out_regions:
#ifdef CONFIG_PCI
if (pdev)
pci_release_regions(pdev);
#endif
err_out:
#endif
if (pdev)
pci_disable_device(pdev);
return rc;

View file

@ -431,8 +431,7 @@ static void axienet_setoptions(struct net_device *ndev, u32 options)
lp->options |= options;
}
static void __axienet_device_reset(struct axienet_local *lp,
struct device *dev, off_t offset)
static void __axienet_device_reset(struct axienet_local *lp, off_t offset)
{
u32 timeout;
/* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset
@ -468,8 +467,8 @@ static void axienet_device_reset(struct net_device *ndev)
u32 axienet_status;
struct axienet_local *lp = netdev_priv(ndev);
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
lp->options |= XAE_OPTION_VLAN;
@ -1338,8 +1337,8 @@ static void axienet_dma_err_handler(unsigned long data)
axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg &
~XAE_MDIO_MC_MDIOEN_MASK));
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_TX_CR_OFFSET);
__axienet_device_reset(lp, XAXIDMA_RX_CR_OFFSET);
axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg);
axienet_mdio_wait_until_ready(lp);

View file

@ -442,8 +442,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
}
net_trans_info = get_net_transport_info(skb, &hdr_offset);
if (net_trans_info == TRANSPORT_INFO_NOT_IP)
goto do_send;
/*
* Setup the sendside checksum offload only if this is not a
@ -478,56 +476,29 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
}
lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
goto do_send;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (net_trans_info & INFO_TCP) {
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
if (net_trans_info & (INFO_IPV4 << 16))
csum_info->transmit.is_ipv4 = 1;
else
csum_info->transmit.is_ipv6 = 1;
csum_info->transmit.tcp_checksum = 1;
csum_info->transmit.tcp_header_offset = hdr_offset;
} else {
/* UDP checksum (and other) offload is not supported. */
if (skb_checksum_help(skb))
goto drop;
}
}
if ((skb->ip_summed == CHECKSUM_NONE) ||
(skb->ip_summed == CHECKSUM_UNNECESSARY))
goto do_send;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
if (net_trans_info & (INFO_IPV4 << 16))
csum_info->transmit.is_ipv4 = 1;
else
csum_info->transmit.is_ipv6 = 1;
if (net_trans_info & INFO_TCP) {
csum_info->transmit.tcp_checksum = 1;
csum_info->transmit.tcp_header_offset = hdr_offset;
} else if (net_trans_info & INFO_UDP) {
/* UDP checksum offload is not supported on ws2008r2.
* Furthermore, on ws2012 and ws2012r2, there are some
* issues with udp checksum offload from Linux guests.
* (these are host issues).
* For now compute the checksum here.
*/
struct udphdr *uh;
u16 udp_len;
ret = skb_cow_head(skb, 0);
if (ret)
goto no_memory;
uh = udp_hdr(skb);
udp_len = ntohs(uh->len);
uh->check = 0;
uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr,
udp_len, IPPROTO_UDP,
csum_partial(uh, udp_len, 0));
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
csum_info->transmit.udp_checksum = 0;
}
do_send:
/* Start filling in the page buffers with the rndis hdr */
rndis_msg->msg_len += rndis_msg_size;
packet->total_data_buflen = rndis_msg->msg_len;

View file

@ -607,6 +607,21 @@ void phy_start_machine(struct phy_device *phydev)
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, HZ);
}
/**
* phy_trigger_machine - trigger the state machine to run
*
* @phydev: the phy_device struct
*
* Description: There has been a change in state which requires that the
* state machine runs.
*/
static void phy_trigger_machine(struct phy_device *phydev)
{
cancel_delayed_work_sync(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
}
/**
* phy_stop_machine - stop the PHY state machine tracking
* @phydev: target phy_device struct
@ -639,6 +654,8 @@ static void phy_error(struct phy_device *phydev)
mutex_lock(&phydev->lock);
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
phy_trigger_machine(phydev);
}
/**
@ -800,8 +817,7 @@ void phy_change(struct work_struct *work)
}
/* reschedule state queue work to run as soon as possible */
cancel_delayed_work_sync(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
phy_trigger_machine(phydev);
return;
ignore:
@ -890,6 +906,8 @@ void phy_start(struct phy_device *phydev)
/* if phy was suspended, bring the physical link up again */
if (do_resume)
phy_resume(phydev);
phy_trigger_machine(phydev);
}
EXPORT_SYMBOL(phy_start);

View file

@ -59,6 +59,10 @@ enum qmi_wwan_flags {
QMI_WWAN_FLAG_RAWIP = 1 << 0,
};
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
};
static void qmi_wwan_netdev_setup(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
@ -411,9 +415,14 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
* clearing out state the clients might need.
*
* MDM9x30 is the first QMI chipset with USB3 support. Abuse
* this fact to enable the quirk.
* this fact to enable the quirk for all USB3 devices.
*
* There are also chipsets with the same "set DTR" requirement
* but without USB3 support. Devices based on these chips
* need a quirk flag in the device ID table.
*/
if (le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
if (dev->driver_info->data & QMI_WWAN_QUIRK_DTR ||
le16_to_cpu(dev->udev->descriptor.bcdUSB) >= 0x0201) {
qmi_wwan_manage_power(dev, 1);
qmi_wwan_change_dtr(dev, true);
}
@ -526,6 +535,16 @@ static const struct driver_info qmi_wwan_info = {
.rx_fixup = qmi_wwan_rx_fixup,
};
static const struct driver_info qmi_wwan_info_quirk_dtr = {
.description = "WWAN/QMI device",
.flags = FLAG_WWAN,
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
.rx_fixup = qmi_wwan_rx_fixup,
.data = QMI_WWAN_QUIRK_DTR,
};
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@ -533,6 +552,11 @@ static const struct driver_info qmi_wwan_info = {
USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
.driver_info = (unsigned long)&qmi_wwan_info
/* devices requiring "set DTR" quirk */
#define QMI_QUIRK_SET_DTR(vend, prod, num) \
USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
/* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */
#define QMI_GOBI1K_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 3)
@ -895,6 +919,8 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */

View file

@ -407,4 +407,8 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb);
#ifdef CONFIG_DEBUG_FS
void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m);
#endif
#endif /* __XEN_NETBACK__COMMON_H__ */

View file

@ -360,6 +360,74 @@ u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
return XEN_NETIF_CTRL_STATUS_SUCCESS;
}
#ifdef CONFIG_DEBUG_FS
void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m)
{
unsigned int i;
switch (vif->hash.alg) {
case XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ:
seq_puts(m, "Hash Algorithm: TOEPLITZ\n");
break;
case XEN_NETIF_CTRL_HASH_ALGORITHM_NONE:
seq_puts(m, "Hash Algorithm: NONE\n");
/* FALLTHRU */
default:
return;
}
if (vif->hash.flags) {
seq_puts(m, "\nHash Flags:\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4)
seq_puts(m, "- IPv4\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP)
seq_puts(m, "- IPv4 + TCP\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6)
seq_puts(m, "- IPv6\n");
if (vif->hash.flags & XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP)
seq_puts(m, "- IPv6 + TCP\n");
}
seq_puts(m, "\nHash Key:\n");
for (i = 0; i < XEN_NETBK_MAX_HASH_KEY_SIZE; ) {
unsigned int j, n;
n = 8;
if (i + n >= XEN_NETBK_MAX_HASH_KEY_SIZE)
n = XEN_NETBK_MAX_HASH_KEY_SIZE - i;
seq_printf(m, "[%2u - %2u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%02x ", vif->hash.key[i]);
seq_puts(m, "\n");
}
if (vif->hash.size != 0) {
seq_puts(m, "\nHash Mapping:\n");
for (i = 0; i < vif->hash.size; ) {
unsigned int j, n;
n = 8;
if (i + n >= vif->hash.size)
n = vif->hash.size - i;
seq_printf(m, "[%4u - %4u]: ", i, i + n - 1);
for (j = 0; j < n; j++, i++)
seq_printf(m, "%4u ", vif->hash.mapping[i]);
seq_puts(m, "\n");
}
}
}
#endif /* CONFIG_DEBUG_FS */
void xenvif_init_hash(struct xenvif *vif)
{
if (xenvif_hash_cache_size == 0)

View file

@ -337,9 +337,9 @@ static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
frag_data += pkt->frag_offset;
frag_len -= pkt->frag_offset;
chunk_len = min(frag_len, XEN_PAGE_SIZE - offset);
chunk_len = min(chunk_len,
XEN_PAGE_SIZE - xen_offset_in_page(frag_data));
chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
xen_offset_in_page(frag_data));
pkt->frag_offset += chunk_len;
@ -425,6 +425,8 @@ void xenvif_rx_skb(struct xenvif_queue *queue)
xenvif_rx_next_skb(queue, &pkt);
queue->last_rx_time = jiffies;
do {
struct xen_netif_rx_request *req;
struct xen_netif_rx_response *rsp;

View file

@ -165,7 +165,7 @@ xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
return count;
}
static int xenvif_dump_open(struct inode *inode, struct file *filp)
static int xenvif_io_ring_open(struct inode *inode, struct file *filp)
{
int ret;
void *queue = NULL;
@ -179,13 +179,35 @@ static int xenvif_dump_open(struct inode *inode, struct file *filp)
static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
.owner = THIS_MODULE,
.open = xenvif_dump_open,
.open = xenvif_io_ring_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
.write = xenvif_write_io_ring,
};
static int xenvif_read_ctrl(struct seq_file *m, void *v)
{
struct xenvif *vif = m->private;
xenvif_dump_hash_info(vif, m);
return 0;
}
static int xenvif_ctrl_open(struct inode *inode, struct file *filp)
{
return single_open(filp, xenvif_read_ctrl, inode->i_private);
}
static const struct file_operations xenvif_dbg_ctrl_ops_fops = {
.owner = THIS_MODULE,
.open = xenvif_ctrl_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static void xenvif_debugfs_addif(struct xenvif *vif)
{
struct dentry *pfile;
@ -210,6 +232,17 @@ static void xenvif_debugfs_addif(struct xenvif *vif)
pr_warn("Creation of io_ring file returned %ld!\n",
PTR_ERR(pfile));
}
if (vif->ctrl_irq) {
pfile = debugfs_create_file("ctrl",
S_IRUSR,
vif->xenvif_dbg_root,
vif,
&xenvif_dbg_ctrl_ops_fops);
if (IS_ERR_OR_NULL(pfile))
pr_warn("Creation of ctrl file returned %ld!\n",
PTR_ERR(pfile));
}
} else
netdev_warn(vif->dev,
"Creation of vif debugfs dir returned %ld!\n",

View file

@ -193,6 +193,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
if (err)
break;
memset(&precise_offset, 0, sizeof(precise_offset));
ts = ktime_to_timespec64(xtstamp.device);
precise_offset.device.sec = ts.tv_sec;
precise_offset.device.nsec = ts.tv_nsec;

View file

@ -92,12 +92,21 @@ __mlx5_mask(typ, fld))
___t; \
})
#define MLX5_SET64(typ, p, fld, v) do { \
#define __MLX5_SET64(typ, p, fld, v) do { \
BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
*((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
} while (0)
#define MLX5_SET64(typ, p, fld, v) do { \
BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
__MLX5_SET64(typ, p, fld, v); \
} while (0)
#define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
__MLX5_SET64(typ, p, fld[idx], v); \
} while (0)
#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
#define MLX5_GET64_PR(typ, p, fld) ({ \

View file

@ -114,6 +114,25 @@ static inline u32 l3mdev_fib_table(const struct net_device *dev)
return tb_id;
}
static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
{
struct net_device *dev;
bool rc = false;
if (ifindex == 0)
return false;
rcu_read_lock();
dev = dev_get_by_index_rcu(net, ifindex);
if (dev)
rc = netif_is_l3_master(dev);
rcu_read_unlock();
return rc;
}
struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6);
static inline
@ -207,6 +226,11 @@ static inline u32 l3mdev_fib_table_by_index(struct net *net, int ifindex)
return 0;
}
static inline bool netif_index_is_l3_master(struct net *net, int ifindex)
{
return false;
}
static inline
struct dst_entry *l3mdev_link_scope_lookup(struct net *net, struct flowi6 *fl6)
{

View file

@ -217,6 +217,7 @@ static const struct brport_attribute *brport_attrs[] = {
#endif
&brport_attr_proxyarp,
&brport_attr_proxyarp_wifi,
&brport_attr_multicast_flood,
NULL
};

View file

@ -1144,6 +1144,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
return 0;
memset(&vf_vlan_info, 0, sizeof(vf_vlan_info));
vf_mac.vf =
vf_vlan.vf =
vf_vlan_info.vf =

View file

@ -2265,7 +2265,8 @@ struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
if (err) {
res.fi = NULL;
res.table = NULL;
if (fl4->flowi4_oif) {
if (fl4->flowi4_oif &&
!netif_index_is_l3_master(net, fl4->flowi4_oif)) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.

View file

@ -1190,6 +1190,16 @@ out:
return NULL;
}
static void tcp_v6_restore_cb(struct sk_buff *skb)
{
/* We need to move header back to the beginning if xfrm6_policy_check()
* and tcp_v6_fill_cb() are going to be called again.
* ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
*/
memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
sizeof(struct inet6_skb_parm));
}
/* The socket must have it's spinlock held when we get
* here, unless it is a TCP_LISTEN socket.
*
@ -1319,6 +1329,7 @@ ipv6_pktoptions:
np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
skb_set_owner_r(opt_skb, sk);
tcp_v6_restore_cb(opt_skb);
opt_skb = xchg(&np->pktoptions, opt_skb);
} else {
__kfree_skb(opt_skb);
@ -1352,15 +1363,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
TCP_SKB_CB(skb)->sacked = 0;
}
static void tcp_v6_restore_cb(struct sk_buff *skb)
{
/* We need to move header back to the beginning if xfrm6_policy_check()
* and tcp_v6_fill_cb() are going to be called again.
*/
memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
sizeof(struct inet6_skb_parm));
}
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;

View file

@ -343,7 +343,7 @@ static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key)
key->eth.cvlan.tci = 0;
key->eth.cvlan.tpid = 0;
if (likely(skb_vlan_tag_present(skb))) {
if (skb_vlan_tag_present(skb)) {
key->eth.vlan.tci = htons(skb->vlan_tci);
key->eth.vlan.tpid = skb->vlan_proto;
} else {

View file

@ -176,7 +176,7 @@ static void do_setup(struct net_device *netdev)
netdev->vlan_features = netdev->features;
netdev->hw_enc_features = netdev->features;
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
eth_hw_addr_random(netdev);

View file

@ -485,7 +485,8 @@ static unsigned int packet_length(const struct sk_buff *skb)
{
unsigned int length = skb->len - ETH_HLEN;
if (skb_vlan_tagged(skb))
if (!skb_vlan_tag_present(skb) &&
eth_type_vlan(skb->protocol))
length -= VLAN_HLEN;
/* Don't subtract for multiple VLAN tags. Most (all?) drivers allow

View file

@ -341,22 +341,25 @@ int tcf_register_action(struct tc_action_ops *act,
if (!act->act || !act->dump || !act->init || !act->walk || !act->lookup)
return -EINVAL;
/* We have to register pernet ops before making the action ops visible,
* otherwise tcf_action_init_1() could get a partially initialized
* netns.
*/
ret = register_pernet_subsys(ops);
if (ret)
return ret;
write_lock(&act_mod_lock);
list_for_each_entry(a, &act_base, head) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
write_unlock(&act_mod_lock);
unregister_pernet_subsys(ops);
return -EEXIST;
}
}
list_add_tail(&act->head, &act_base);
write_unlock(&act_mod_lock);
ret = register_pernet_subsys(ops);
if (ret) {
tcf_unregister_action(act, ops);
return ret;
}
return 0;
}
EXPORT_SYMBOL(tcf_register_action);
@ -367,8 +370,6 @@ int tcf_unregister_action(struct tc_action_ops *act,
struct tc_action_ops *a;
int err = -ENOENT;
unregister_pernet_subsys(ops);
write_lock(&act_mod_lock);
list_for_each_entry(a, &act_base, head) {
if (a == act) {
@ -378,6 +379,8 @@ int tcf_unregister_action(struct tc_action_ops *act,
}
}
write_unlock(&act_mod_lock);
if (!err)
unregister_pernet_subsys(ops);
return err;
}
EXPORT_SYMBOL(tcf_unregister_action);

View file

@ -101,7 +101,7 @@ EXPORT_SYMBOL(unregister_tcf_proto_ops);
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
unsigned long fh, int event);
unsigned long fh, int event, bool unicast);
static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n,
@ -112,7 +112,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
it_chain = &tp->next)
tfilter_notify(net, oskb, n, tp, 0, event);
tfilter_notify(net, oskb, n, tp, 0, event, false);
}
/* Select new prio value from the range, managed by kernel. */
@ -319,7 +319,8 @@ replay:
RCU_INIT_POINTER(*back, next);
tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
tfilter_notify(net, skb, n, tp, fh,
RTM_DELTFILTER, false);
tcf_destroy(tp, true);
err = 0;
goto errout;
@ -345,14 +346,14 @@ replay:
struct tcf_proto *next = rtnl_dereference(tp->next);
tfilter_notify(net, skb, n, tp, fh,
RTM_DELTFILTER);
RTM_DELTFILTER, false);
if (tcf_destroy(tp, false))
RCU_INIT_POINTER(*back, next);
}
goto errout;
case RTM_GETTFILTER:
err = tfilter_notify(net, skb, n, tp, fh,
RTM_NEWTFILTER);
RTM_NEWTFILTER, true);
goto errout;
default:
err = -EINVAL;
@ -367,7 +368,7 @@ replay:
RCU_INIT_POINTER(tp->next, rtnl_dereference(*back));
rcu_assign_pointer(*back, tp);
}
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER, false);
} else {
if (tp_created)
tcf_destroy(tp, true);
@ -419,7 +420,7 @@ nla_put_failure:
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
struct nlmsghdr *n, struct tcf_proto *tp,
unsigned long fh, int event)
unsigned long fh, int event, bool unicast)
{
struct sk_buff *skb;
u32 portid = oskb ? NETLINK_CB(oskb).portid : 0;
@ -433,6 +434,9 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
return -EINVAL;
}
if (unicast)
return netlink_unicast(net->rtnl, skb, portid, MSG_DONTWAIT);
return rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
}

View file

@ -246,7 +246,7 @@ static int strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
} else {
strp->rx_interrupted = 1;
}
strp_parser_err(strp, err, desc);
strp_parser_err(strp, len, desc);
break;
} else if (len > strp->sk->sk_rcvbuf) {
/* Message length exceeds maximum allowed */

View file

@ -407,6 +407,7 @@ static int __tipc_nl_add_udp_addr(struct sk_buff *skb,
if (ntohs(addr->proto) == ETH_P_IP) {
struct sockaddr_in ip4;
memset(&ip4, 0, sizeof(ip4));
ip4.sin_family = AF_INET;
ip4.sin_port = addr->port;
ip4.sin_addr.s_addr = addr->ipv4.s_addr;
@ -417,6 +418,7 @@ static int __tipc_nl_add_udp_addr(struct sk_buff *skb,
} else if (ntohs(addr->proto) == ETH_P_IPV6) {
struct sockaddr_in6 ip6;
memset(&ip6, 0, sizeof(ip6));
ip6.sin6_family = AF_INET6;
ip6.sin6_port = addr->port;
memcpy(&ip6.sin6_addr, &addr->ipv6, sizeof(struct in6_addr));