Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

 1) sadb_msg prepared for IPSEC userspace forgets to initialize the
    satype field, fix from Nicolas Dichtel.

 2) Fix mac80211 synchronization during station removal, from Johannes
    Berg.

 3) Fix IPSEC sequence number notifications when they wrap, from Steffen
    Klassert.

 4) Fix cfg80211 wdev tracing crashes when add_virtual_intf() returns an
    error pointer, from Johannes Berg.

 5) In mac80211, don't call into the channel context code with the
    interface list mutex held.  From Johannes Berg.

 6) In mac80211, if we don't actually associate, do not restart the STA
    timer, otherwise we can crash.  From Ben Greear.

 7) Missing dma_mapping_error() check in e1000, ixgb, and e1000e.  From
    Christoph Paasch.

 8) Fix sja1000 driver defines to not conflict with SH port, from Marc
    Kleine-Budde.

 9) Don't call il4965_rs_use_green with a NULL station, from Colin Ian
    King.

10) Suspend/Resume in the FEC driver fail because the buffer descriptors
    are not initialized at all the moments in which they should.  Fix
    from Frank Li.

11) cpsw and davinci_emac drivers both use the wrong interface to
    restart a stopped TX queue.  Use netif_wake_queue not
    netif_start_queue, the latter is for initialization/bringup not
    active management of the queue.  From Mugunthan V N.

12) Fix regression in rate calculations done by
    psched_ratecfg_precompute(), missing u64 type promotion.  From
    Sergey Popovich.

13) Fix length overflow in tg3 VPD parsing, from Kees Cook.

14) AOE driver fails to allocate enough headroom, resulting in crashes.
    Fix from Eric Dumazet.

15) RX overflow happens too quickly in sky2 driver because pause packet
    thresholds are not programmed correctly.  From Mirko Lindner.

16) Bonding driver manages arp_interval and miimon settings incorrectly,
    disabling one unintentionally disables both.  Fix from Nikolay
    Aleksandrov.

17) smsc75xx drivers don't program the RX mac properly for jumbo frames.
    Fix from Steve Glendinning.

18) Fix off-by-one in Codel packet scheduler.  From Vijay Subramanian.

19) Fix packet corruption in atl1c by disabling MSI support, from Hannes
    Frederic Sowa.

20) netdev_rx_handler_unregister() needs a synchronize_net() to fix
    crashes in bonding driver unload stress tests.  From Eric Dumazet.

21) rxlen field of ks8851 RX packet descriptors not interpreted
    correctly (it is 12 bits not 16 bits, so needs to be masked after
    shifting the 32-bit value down 16 bits).  Fix from Max Nekludov.

22) Fix missed RX/TX enable in sh_eth driver due to mishandling of link
    change indications.  From Sergei Shtylyov.

23) Fix crashes during spurious ECI interrupts in sh_eth driver, also
    from Sergei Shtylyov.

24) dm9000 driver initialization is done wrong for revision B devices
    with DSP PHY, from Joseph CHANG.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (53 commits)
  DM9000B: driver initialization upgrade
  sh_eth: make 'link' field of 'struct sh_eth_private' *int*
  sh_eth: workaround for spurious ECI interrupt
  sh_eth: fix handling of no LINK signal
  ks8851: Fix interpretation of rxlen field.
  net: add a synchronize_net() in netdev_rx_handler_unregister()
  MAINTAINERS: Update netxen_nic maintainers list
  atl1e: drop pci-msi support because of packet corruption
  net: fq_codel: Fix off-by-one error
  net: calxedaxgmac: Wake-on-LAN fixes
  net: calxedaxgmac: fix rx ring handling when OOM
  net: core: Remove redundant call to 'nf_reset' in 'dev_forward_skb'
  smsc75xx: fix jumbo frame support
  net: fix the use of this_cpu_ptr
  bonding: fix disabling of arp_interval and miimon
  ipv6: don't accept node local multicast traffic from the wire
  sky2: Threshold for Pause Packet is set wrong
  sky2: Receive Overflows not counted
  aoe: reserve enough headroom on skbs
  line up comment for ndo_bridge_getlink
  ...
This commit is contained in:
Linus Torvalds 2013-04-01 08:06:30 -07:00
commit ff3421dee6
57 changed files with 803 additions and 621 deletions

View file

@ -5065,9 +5065,8 @@ S: Maintained
F: drivers/net/ethernet/marvell/sk*
MARVELL LIBERTAS WIRELESS DRIVER
M: Dan Williams <dcbw@redhat.com>
L: libertas-dev@lists.infradead.org
S: Maintained
S: Orphan
F: drivers/net/wireless/libertas/
MARVELL MV643XX ETHERNET DRIVER
@ -5569,6 +5568,7 @@ F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
M: Rajesh Borundia <rajesh.borundia@qlogic.com>
L: netdev@vger.kernel.org

View file

@ -51,8 +51,9 @@ new_skb(ulong len)
{
struct sk_buff *skb;
skb = alloc_skb(len, GFP_ATOMIC);
skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
if (skb) {
skb_reserve(skb, MAX_HEADER);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE);

View file

@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
goto out;
}
if (new_value < 0) {
pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
bond->dev->name, new_value, INT_MAX);
ret = -EINVAL;
goto out;
@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
pr_info("%s: Setting ARP monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.arp_interval = new_value;
if (bond->params.miimon) {
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0]) {
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
bond->dev->name);
if (new_value) {
if (bond->params.miimon) {
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* timer will get fired off when the open function
* is called.
*/
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
if (!new_value) {
cancel_delayed_work_sync(&bond->arp_work);
} else {
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
}
out:
rtnl_unlock();
return ret;
@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
goto out;
}
if (new_value < 0) {
pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
pr_info("%s: Setting MII monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.miimon = new_value;
if (bond->params.updelay)
pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (bond->params.arp_interval) {
pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate) {
bond->params.arp_validate =
BOND_ARP_VALIDATE_NONE;
}
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
* the MII timer. If the interface is down, the
* timer will get fired off when the open function
* is called.
*/
}
pr_info("%s: Setting MII monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.miimon = new_value;
if (bond->params.updelay)
pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (new_value && bond->params.arp_interval) {
pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
* the MII timer. If the interface is down, the
* timer will get fired off when the open function
* is called.
*/
if (!new_value) {
cancel_delayed_work_sync(&bond->mii_work);
} else {
cancel_delayed_work_sync(&bond->arp_work);
queue_delayed_work(bond->wq, &bond->mii_work, 0);
}

View file

@ -46,6 +46,7 @@ config CAN_EMS_PCI
config CAN_PEAK_PCMCIA
tristate "PEAK PCAN-PC Card"
depends on PCMCIA
depends on HAS_IOPORT
---help---
This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
from PEAK-System (http://www.peak-system.com). To compile this

View file

@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
*/
if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
REG_CR_BASICCAN_INITIAL &&
(priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
(priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
(priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
flag = 1;
@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
* See states on p. 23 of the Datasheet.
*/
if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
return flag;

View file

@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
*/
spin_lock_irqsave(&priv->cmdreg_lock, flags);
priv->write_reg(priv, REG_CMR, val);
priv->read_reg(priv, REG_SR);
priv->read_reg(priv, SJA1000_REG_SR);
spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
}
@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
n++;
status = priv->read_reg(priv, REG_SR);
status = priv->read_reg(priv, SJA1000_REG_SR);
/* check for absent controller due to hw unplug */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;
@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
/* receive interrupt */
while (status & SR_RBS) {
sja1000_rx(dev);
status = priv->read_reg(priv, REG_SR);
status = priv->read_reg(priv, SJA1000_REG_SR);
/* check for absent controller */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;

View file

@ -56,7 +56,7 @@
/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
#define REG_MOD 0x00
#define REG_CMR 0x01
#define REG_SR 0x02
#define SJA1000_REG_SR 0x02
#define REG_IR 0x03
#define REG_IER 0x04
#define REG_ALC 0x0B

View file

@ -438,7 +438,6 @@ struct atl1e_adapter {
struct atl1e_hw hw;
struct atl1e_hw_stats hw_stats;
bool have_msi;
u32 wol;
u16 link_speed;
u16 link_duplex;

View file

@ -1849,34 +1849,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
}
static int atl1e_request_irq(struct atl1e_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
int flags = 0;
int err = 0;
adapter->have_msi = true;
err = pci_enable_msi(pdev);
if (err) {
netdev_dbg(netdev,
"Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = false;
}
if (!adapter->have_msi)
flags |= IRQF_SHARED;
err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
netdev);
if (err) {
netdev_dbg(adapter->netdev,
"Unable to allocate interrupt Error: %d\n", err);
if (adapter->have_msi)
pci_disable_msi(pdev);
return err;
}
netdev_dbg(netdev, "atl1e_request_irq OK\n");

View file

@ -14604,8 +14604,11 @@ static void tg3_read_vpd(struct tg3 *tp)
if (j + len > block_end)
goto partno;
memcpy(tp->fw_ver, &vpd_data[j], len);
strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
if (len >= sizeof(tp->fw_ver))
len = sizeof(tp->fw_ver) - 1;
memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
&vpd_data[j]);
}
partno:

View file

@ -163,6 +163,7 @@
#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
/* XGMAC_INT_STAT reg */
#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
/* Mask power mgt interrupt */
writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
struct sk_buff *skb;
int frame_len;
if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
break;
entry = priv->rx_tail;
p = priv->dma_rx + entry;
if (desc_get_owner(p))
@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode & WAKE_MAGIC)
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
if (mode & WAKE_UCAST)
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;

View file

@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
tmp = readl(reg);
}
/*
* Sleep, either by using msleep() or if we are suspending, then
* use mdelay() to sleep.
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
if (db->in_suspend)
mdelay(ms);
else
msleep(ms);
}
/* Read a word from phyxcer */
static int
dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned int reg_save;
int ret;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Issue phyxcer read command */
iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait read complete */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
/* The read data keeps on REG_0D & REG_0E */
ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
return ret;
}
/* Write a word to phyxcer */
static void
dm9000_phy_write(struct net_device *dev,
int phyaddr_unused, int reg, int value)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Fill the written data into REG_0D & REG_0E */
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
/* Issue phyxcer write command */
iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait write complete */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
/* dm9000_set_io
*
* select the specified set of io routines to use with the
@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
/* if wol is needed, then always set NCR_WAKEEN otherwise we end
@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
return 0;
}
/*
* Sleep, either by using msleep() or if we are suspending, then
* use mdelay() to sleep.
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
if (db->in_suspend)
mdelay(ms);
else
msleep(ms);
}
/*
* Read a word from phyxcer
*/
static int
dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned int reg_save;
int ret;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock,flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
dm9000_msleep(db, 1); /* Wait read complete */
spin_lock_irqsave(&db->lock,flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
/* The read data keeps on REG_0D & REG_0E */
ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
mutex_unlock(&db->addr_lock);
dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
return ret;
}
/*
* Write a word to phyxcer
*/
static void
dm9000_phy_write(struct net_device *dev,
int phyaddr_unused, int reg, int value)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock,flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Fill the written data into REG_0D & REG_0E */
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait write complete */
spin_lock_irqsave(&db->lock,flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
static void
dm9000_shutdown(struct net_device *dev)
{
@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
dm9000_reset(db);
/* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
* Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
* while probe stage.
*/
iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {

View file

@ -69,7 +69,9 @@
#define NCR_WAKEEN (1<<6)
#define NCR_FCOL (1<<4)
#define NCR_FDX (1<<3)
#define NCR_LBK (3<<1)
#define NCR_RESERVED (3<<1)
#define NCR_MAC_LBK (1<<1)
#define NCR_RST (1<<0)
#define NSR_SPEED (1<<7)
@ -167,5 +169,12 @@
#define ISR_LNKCHNG (1<<5)
#define ISR_UNDERRUN (1<<4)
/* Davicom MII registers.
*/
#define MII_DM_DSPCR 0x1b /* DSP Control Register */
#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
#endif /* _DM9000X_H_ */

View file

@ -345,6 +345,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
/* Init RX & TX buffer descriptors
*/
static void fec_enet_bd_init(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *bdp;
unsigned int i;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY;
else
bdp->cbd_sc = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fep->cur_rx = fep->rx_bd_base;
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
fep->tx_skbuff[i] = NULL;
}
bdp->cbd_bufaddr = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
}
/* This function is called to start or restart the FEC during a link
* change. This only happens when switching between half and full
* duplex.
@ -388,6 +435,8 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
fec_enet_bd_init(ndev);
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
@ -397,7 +446,6 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
fep->cur_rx = fep->rx_bd_base;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
@ -1597,8 +1645,6 @@ static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
struct bufdesc *bdp;
unsigned int i;
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@ -1608,6 +1654,7 @@ static int fec_enet_init(struct net_device *ndev)
return -ENOMEM;
}
memset(cbd_base, 0, PAGE_SIZE);
spin_lock_init(&fep->hw_lock);
fep->netdev = ndev;
@ -1631,35 +1678,6 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
fec_restart(ndev, 0);
return 0;

View file

@ -1053,6 +1053,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@ -1069,7 +1073,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
ret_val = 4;
ret_val = 5;
goto err_nomem;
}
@ -1077,7 +1081,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
if (!rxdr->desc) {
ret_val = 5;
ret_val = 6;
goto err_nomem;
}
memset(rxdr->desc, 0, rxdr->size);
@ -1101,7 +1105,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
if (!skb) {
ret_val = 6;
ret_val = 7;
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
@ -1110,6 +1114,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}

View file

@ -848,11 +848,16 @@ check_page:
}
}
if (!buffer_info->dma)
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
adapter->alloc_rx_buff_failed++;
break;
}
}
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);

View file

@ -2159,6 +2159,10 @@ map_skb:
skb->data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
adapter->alloc_rx_buff_failed++;
break;
}
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@ -2168,7 +2172,8 @@ map_skb:
rx_desc->status = 0;
if (++i == rx_ring->count) i = 0;
if (++i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}

View file

@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
tp = space - 2048/8;
tp = space - 8192/8;
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
} else {

View file

@ -2074,7 +2074,7 @@ enum {
GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
#define GMAC_DEF_MSK GM_IS_TX_FF_UR
#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
};
/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */

View file

@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
for (; rxfc != 0; rxfc--) {
rxh = ks8851_rdreg32(ks, KS_RXFHSR);
rxstat = rxh & 0xffff;
rxlen = rxh >> 16;
rxlen = (rxh >> 16) & 0xfff;
netif_dbg(ks, rx_status, ks->netdev,
"rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);

View file

@ -1216,10 +1216,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
if (mdp->cd->no_psr || mdp->no_ether_link) {
if (mdp->link == PHY_DOWN)
link_stat = 0;
else
link_stat = PHY_ST_LINK;
goto ignore_link;
} else {
link_stat = (sh_eth_read(ndev, PSR));
if (mdp->ether_link_active_low)
@ -1242,6 +1239,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
}
}
ignore_link:
if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
@ -1326,12 +1324,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
u32 intr_status = 0;
unsigned long intr_status;
spin_lock(&mdp->lock);
/* Get interrpt stat */
/* Get interrupt status */
intr_status = sh_eth_read(ndev, EESR);
/* Mask it with the interrupt mask, forcing ECI interrupt to be always
* enabled since it's the one that comes thru regardless of the mask,
* and we need to fully handle it in sh_eth_error() in order to quench
* it as it doesn't get cleared by just writing 1 to the ECI bit...
*/
intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@ -1373,7 +1377,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
struct phy_device *phydev = mdp->phydev;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
if (phydev->link) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
@ -1387,17 +1391,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
if (!mdp->link) {
sh_eth_write(ndev,
(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
if (mdp->cd->no_psr || mdp->no_ether_link)
sh_eth_rcv_snd_enable(ndev);
}
} else if (mdp->link) {
new_state = 1;
mdp->link = PHY_DOWN;
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
if (mdp->cd->no_psr || mdp->no_ether_link)
sh_eth_rcv_snd_disable(ndev);
}
if (new_state && netif_msg_link(mdp))
@ -1414,7 +1422,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
mdp->mii_bus->id , mdp->phy_id);
mdp->link = PHY_DOWN;
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;

View file

@ -723,7 +723,7 @@ struct sh_eth_private {
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
enum phy_state link;
int link;
phy_interface_t phy_interface;
int msg_enable;
int speed;

View file

@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
netif_wake_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;

View file

@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
netif_wake_queue(ndev);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);

View file

@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct usbnet *dev = netdev_priv(netdev);
int ret;
int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
if (new_mtu > MAX_SINGLE_PACKET_SIZE)
return -EINVAL;
ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set mac rx frame length\n");
return ret;
@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set max rx frame length\n");
return ret;
@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
dev->net->stats.rx_frame_errors++;
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
if (unlikely(size > (ETH_FRAME_LEN + 12))) {
/* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
netif_dbg(dev, rx_err, dev->net,
"size err rx_cmd_a=0x%08x\n",
rx_cmd_a);

View file

@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
}
/*

View file

@ -1487,8 +1487,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_dma_ops *ops;
struct b43_dmaring *ring;
struct b43_dmadesc_meta *meta;
static const struct b43_txstatus fake; /* filled with 0 */
const struct b43_txstatus *txstat;
int slot, firstused;
bool frame_succeed;
int skip;
static u8 err_out1, err_out2;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
@ -1501,13 +1505,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
firstused = ring->current_slot - ring->used_slots + 1;
if (firstused < 0)
firstused = ring->nr_slots + firstused;
skip = 0;
if (unlikely(slot != firstused)) {
/* This possibly is a firmware bug and will result in
* malfunction, memory leaks and/or stall of DMA functionality. */
b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
"Expected %d, but got %d\n",
ring->index, firstused, slot);
return;
* malfunction, memory leaks and/or stall of DMA functionality.
*/
if (slot == next_slot(ring, next_slot(ring, firstused))) {
/* If a single header/data pair was missed, skip over
* the first two slots in an attempt to recover.
*/
slot = firstused;
skip = 2;
if (!err_out1) {
/* Report the error once. */
b43dbg(dev->wl,
"Skip on DMA ring %d slot %d.\n",
ring->index, slot);
err_out1 = 1;
}
} else {
/* More than a single header/data pair were missed.
* Report this error once.
*/
if (!err_out2)
b43dbg(dev->wl,
"Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot);
err_out2 = 1;
return;
}
}
ops = ring->ops;
@ -1522,11 +1549,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
slot, firstused, ring->index);
break;
}
if (meta->skb) {
struct b43_private_tx_info *priv_info =
b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
unmap_descbuffer(ring, meta->dmaaddr,
meta->skb->len, 1);
kfree(priv_info->bouncebuffer);
priv_info->bouncebuffer = NULL;
} else {
@ -1538,8 +1567,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
struct ieee80211_tx_info *info;
if (unlikely(!meta->skb)) {
/* This is a scatter-gather fragment of a frame, so
* the skb pointer must not be NULL. */
/* This is a scatter-gather fragment of a frame,
* so the skb pointer must not be NULL.
*/
b43dbg(dev->wl, "TX status unexpected NULL skb "
"at slot %d (first=%d) on ring %d\n",
slot, firstused, ring->index);
@ -1550,9 +1580,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/*
* Call back to inform the ieee80211 subsystem about
* the status of the transmission.
* the status of the transmission. When skipping over
* a missed TX status report, use a status structure
* filled with zeros to indicate that the frame was not
* sent (frame_count 0) and not acknowledged
*/
frame_succeed = b43_fill_txstatus_report(dev, info, status);
if (unlikely(skip))
txstat = &fake;
else
txstat = status;
frame_succeed = b43_fill_txstatus_report(dev, info,
txstat);
#ifdef CONFIG_B43_DEBUG
if (frame_succeed)
ring->nr_succeed_tx_packets++;
@ -1580,12 +1619,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
if (meta->is_last_fragment) {
if (meta->is_last_fragment && !skip) {
/* This is the last scatter-gather
* fragment of the frame. We are done. */
break;
}
slot = next_slot(ring, slot);
if (skip > 0)
--skip;
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);

View file

@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u16 clip_off[2] = { 0xFFFF, 0xFFFF };
u8 vcm_final = 0;
s8 offset[4];
s32 offset[4];
s32 results[8][4] = { };
s32 results_min[4] = { };
s32 poll_results[4] = { };
@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
for (i = 0; i < 4; i += 2) {
s32 curr;
s32 mind = 40;
s32 mind = 0x100000;
s32 minpoll = 249;
u8 minvcm = 0;
if (2 * core != i)
@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
u8 regs_save_radio[2];
u16 regs_save_phy[2];
s8 offset[4];
s32 offset[4];
u8 core;
u8 rail;
@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
}
for (i = 0; i < 4; i++) {
s32 mind = 40;
s32 mind = 0x100000;
u8 minvcm = 0;
s32 minpoll = 249;
s32 curr;

View file

@ -1137,9 +1137,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
((lna2 & 0x3) << 4) |
((lna1 & 0x3) << 2) |
((lna1 & 0x3) << 0);
((lna2 &
0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@ -1157,8 +1156,6 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
}
mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
}
@ -1331,43 +1328,6 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
u16 tia_gain, u16 lna2_gain)
{
u32 i_thresh_l, q_thresh_l;
u32 i_thresh_h, q_thresh_h;
struct lcnphy_iq_est iq_est_h, iq_est_l;
wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
lna2_gain, 0);
wlc_lcnphy_rx_gain_override_enable(pi, true);
wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
udelay(500);
write_radio_reg(pi, RADIO_2064_REG112, 0);
if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
return false;
wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
udelay(500);
write_radio_reg(pi, RADIO_2064_REG112, 0);
if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
return false;
i_thresh_l = (iq_est_l.i_pwr << 1);
i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
q_thresh_l = (iq_est_l.q_pwr << 1);
q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
if ((iq_est_h.i_pwr > i_thresh_l) &&
(iq_est_h.i_pwr < i_thresh_h) &&
(iq_est_h.q_pwr > q_thresh_l) &&
(iq_est_h.q_pwr < q_thresh_h))
return true;
return false;
}
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
@ -1382,8 +1342,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
int tia_gain, lna2_gain, biq1_gain;
bool set_gain;
int tia_gain;
u32 received_power, rx_pwr_threshold;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
@ -1408,135 +1368,127 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
goto cal_done;
}
WARN_ON(module != 1);
tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
if (module == 1) {
for (i = 0; i < 11; i++)
values_to_save[i] =
read_radio_reg(pi, rxiq_cal_rf_reg[i]);
Core1TxControl_old = read_phy_reg(pi, 0x631);
tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
or_phy_reg(pi, 0x631, 0x0015);
for (i = 0; i < 11; i++)
values_to_save[i] =
read_radio_reg(pi, rxiq_cal_rf_reg[i]);
Core1TxControl_old = read_phy_reg(pi, 0x631);
RFOverride0_old = read_phy_reg(pi, 0x44c);
RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
rfoverride2_old = read_phy_reg(pi, 0x4b0);
rfoverride2val_old = read_phy_reg(pi, 0x4b1);
rfoverride3_old = read_phy_reg(pi, 0x4f9);
rfoverride3val_old = read_phy_reg(pi, 0x4fa);
rfoverride4_old = read_phy_reg(pi, 0x938);
rfoverride4val_old = read_phy_reg(pi, 0x939);
afectrlovr_old = read_phy_reg(pi, 0x43b);
afectrlovrval_old = read_phy_reg(pi, 0x43c);
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
or_phy_reg(pi, 0x631, 0x0015);
tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
if (tx_gain_override_old) {
wlc_lcnphy_get_tx_gain(pi, &old_gains);
tx_gain_index_old = pi_lcn->lcnphy_current_index;
}
RFOverride0_old = read_phy_reg(pi, 0x44c);
RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
rfoverride2_old = read_phy_reg(pi, 0x4b0);
rfoverride2val_old = read_phy_reg(pi, 0x4b1);
rfoverride3_old = read_phy_reg(pi, 0x4f9);
rfoverride3val_old = read_phy_reg(pi, 0x4fa);
rfoverride4_old = read_phy_reg(pi, 0x938);
rfoverride4val_old = read_phy_reg(pi, 0x939);
afectrlovr_old = read_phy_reg(pi, 0x43b);
afectrlovrval_old = read_phy_reg(pi, 0x43c);
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
write_radio_reg(pi, RADIO_2064_REG116, 0x06);
write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
write_radio_reg(pi, RADIO_2064_REG098, 0x03);
write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
write_radio_reg(pi, RADIO_2064_REG114, 0x01);
write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
write_phy_reg(pi, 0x6da, 0xffff);
or_phy_reg(pi, 0x6db, 0x3);
wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
set_gain = false;
lna2_gain = 3;
while ((lna2_gain >= 0) && !set_gain) {
tia_gain = 4;
while ((tia_gain >= 0) && !set_gain) {
biq1_gain = 6;
while ((biq1_gain >= 0) && !set_gain) {
set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
(u16)
biq1_gain,
(u16)
tia_gain,
(u16)
lna2_gain);
biq1_gain -= 1;
}
tia_gain -= 1;
tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
if (tx_gain_override_old) {
wlc_lcnphy_get_tx_gain(pi, &old_gains);
tx_gain_index_old = pi_lcn->lcnphy_current_index;
}
lna2_gain -= 1;
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
write_radio_reg(pi, RADIO_2064_REG116, 0x06);
write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
write_radio_reg(pi, RADIO_2064_REG098, 0x03);
write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
write_radio_reg(pi, RADIO_2064_REG114, 0x01);
write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
write_phy_reg(pi, 0x6da, 0xffff);
or_phy_reg(pi, 0x6db, 0x3);
wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
wlc_lcnphy_rx_gain_override_enable(pi, true);
tia_gain = 8;
rx_pwr_threshold = 950;
while (tia_gain > 0) {
tia_gain -= 1;
wlc_lcnphy_set_rx_gain_by_distribution(pi,
0, 0, 2, 2,
(u16)
tia_gain, 1, 0);
udelay(500);
received_power =
wlc_lcnphy_measure_digital_power(pi, 2000);
if (received_power < rx_pwr_threshold)
break;
}
result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
wlc_lcnphy_stop_tx_tone(pi);
write_phy_reg(pi, 0x631, Core1TxControl_old);
write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
write_phy_reg(pi, 0x4b0, rfoverride2_old);
write_phy_reg(pi, 0x4b1, rfoverride2val_old);
write_phy_reg(pi, 0x4f9, rfoverride3_old);
write_phy_reg(pi, 0x4fa, rfoverride3val_old);
write_phy_reg(pi, 0x938, rfoverride4_old);
write_phy_reg(pi, 0x939, rfoverride4val_old);
write_phy_reg(pi, 0x43b, afectrlovr_old);
write_phy_reg(pi, 0x43c, afectrlovrval_old);
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
wlc_lcnphy_clear_trsw_override(pi);
mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
for (i = 0; i < 11; i++)
write_radio_reg(pi, rxiq_cal_rf_reg[i],
values_to_save[i]);
if (tx_gain_override_old)
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
else
wlc_lcnphy_disable_tx_gain_override(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
wlc_lcnphy_rx_gain_override_enable(pi, false);
}
if (set_gain)
result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
else
result = false;
wlc_lcnphy_stop_tx_tone(pi);
write_phy_reg(pi, 0x631, Core1TxControl_old);
write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
write_phy_reg(pi, 0x4b0, rfoverride2_old);
write_phy_reg(pi, 0x4b1, rfoverride2val_old);
write_phy_reg(pi, 0x4f9, rfoverride3_old);
write_phy_reg(pi, 0x4fa, rfoverride3val_old);
write_phy_reg(pi, 0x938, rfoverride4_old);
write_phy_reg(pi, 0x939, rfoverride4val_old);
write_phy_reg(pi, 0x43b, afectrlovr_old);
write_phy_reg(pi, 0x43c, afectrlovrval_old);
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
wlc_lcnphy_clear_trsw_override(pi);
mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
for (i = 0; i < 11; i++)
write_radio_reg(pi, rxiq_cal_rf_reg[i],
values_to_save[i]);
if (tx_gain_override_old)
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
else
wlc_lcnphy_disable_tx_gain_override(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
wlc_lcnphy_rx_gain_override_enable(pi, false);
cal_done:
kfree(ptr);
return result;
@ -1829,17 +1781,6 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
if (!(pi->sh->boardflags & BFL_FEM)) {
u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
write_radio_reg(pi, RADIO_2064_REG091, 0x3);
write_radio_reg(pi, RADIO_2064_REG038, 0x3);
write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
}
}
static int
@ -2034,16 +1975,6 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@ -2130,14 +2061,12 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
u8 tssi_sel;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
@ -2159,13 +2088,7 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
if (pi->sh->boardflags & BFL_FEM) {
tssi_sel = 0x1;
wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
} else {
tssi_sel = 0xe;
wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
}
wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@ -2201,10 +2124,9 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
@ -2251,10 +2173,6 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
@ -2873,8 +2791,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
@ -2892,12 +2808,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
wlc_lcnphy_set_bbmult(pi, 0x0);
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
@ -2919,7 +2829,6 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@ -3133,11 +3042,6 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
@ -3939,6 +3843,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
@ -3949,7 +3854,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
@ -4374,22 +4278,17 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
if (CHSPEC_IS5G(pi->radio_chanspec))
pa_gain = 0x70;
else
pa_gain = 0x60;
pa_gain = 0x70;
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
for (j = 0; j < 128; j++) {
if (pi->sh->boardflags & BFL_FEM)
gm_gain = gain_table[j].gm;
else
gm_gain = 15;
gm_gain = gain_table[j].gm;
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
@ -4600,10 +4499,7 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
write_phy_reg(pi, 0x4ea, 0x4688);
if (pi->sh->boardflags & BFL_FEM)
mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
else
mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
@ -4614,13 +4510,6 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
if (!(pi->sh->boardflags & BFL_FEM)) {
write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
write_radio_reg(pi, RADIO_2064_REG033, 0x19);
write_radio_reg(pi, RADIO_2064_REG039, 0xe);
}
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@ -4650,20 +4539,22 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tab);
}
if (!(pi->sh->boardflags & BFL_FEM)) {
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_ptr = &val;
tab.tbl_len = 1;
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_ptr = &val;
tab.tbl_len = 1;
val = 150;
tab.tbl_offset = 0;
wlc_lcnphy_write_table(pi, &tab);
val = 114;
tab.tbl_offset = 0;
wlc_lcnphy_write_table(pi, &tab);
val = 220;
tab.tbl_offset = 1;
wlc_lcnphy_write_table(pi, &tab);
}
val = 130;
tab.tbl_offset = 1;
wlc_lcnphy_write_table(pi, &tab);
val = 6;
tab.tbl_offset = 8;
wlc_lcnphy_write_table(pi, &tab);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
@ -5055,7 +4946,6 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
wlc_lcnphy_tssi_setup(pi);
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@ -5094,7 +4984,8 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if ((pi->sh->boardflags & BFL_FEM) &&
(LCNREV_IS(pi->pubpi.phy_rev, 1))) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;

View file

@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
};
static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
0x0009,
0x000a,
0x0005,
0x0006,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x000a,
0x0005,
0x0006,
0x0005,
0x000a,
0x0009,
0x0006,
0x0005,
};
static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {

View file

@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
int rate_idx;
int i;
u32 rate;
u8 use_green = il4965_rs_use_green(il, sta);
u8 use_green;
u8 active_tbl = 0;
u8 valid_tx_ant;
struct il_station_priv *sta_priv;
@ -2160,6 +2160,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
if (!sta || !lq_sta)
return;
use_green = il4965_rs_use_green(il, sta);
sta_priv = (void *)sta->drv_priv;
i = lq_sta->last_txrate_idx;

View file

@ -1261,6 +1261,15 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return -EIO;
}
/*
* This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
* in iwl_down but cancel the workers only later.
*/
if (!priv->ucode_loaded) {
IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
return -EIO;
}
/*
* Synchronous commands from this op-mode must hold
* the mutex, this ensures we don't try to send two

View file

@ -367,6 +367,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return -EIO;
}
priv->ucode_loaded = true;
if (ucode_type != IWL_UCODE_WOWLAN) {
/* delay a bit to give rfkill time to run */
msleep(5);
@ -380,8 +382,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return ret;
}
priv->ucode_loaded = true;
return 0;
}

View file

@ -475,6 +475,10 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans_pcie->status);
else
clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@ -641,6 +645,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
@ -656,6 +661,10 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans_pcie->status);
else
clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return 0;
@ -694,6 +703,10 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
* op_mode.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans_pcie->status);
else
clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
}
}

View file

@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
int copy = 0;
if (!cmd->len)
if (!cmd->len[i])
continue;
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */

View file

@ -1508,6 +1508,7 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
}
memcpy(adapter->upld_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
skb_push(skb, INTF_HEADER_LEN);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;

View file

@ -895,7 +895,7 @@ struct netdev_fcoe_hbainfo {
*
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
* struct net_device *dev)
* struct net_device *dev, u32 filter_mask)
*
* int (*ndo_change_carrier)(struct net_device *dev, bool new_carrier);
* Called to change device carrier. Soft-devices (like dummy, team, etc)

View file

@ -1624,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
}
skb_orphan(skb);
nf_reset(skb);
if (unlikely(!is_skb_forwardable(dev, skb))) {
atomic_long_inc(&dev->rx_dropped);
@ -3314,6 +3313,7 @@ int netdev_rx_handler_register(struct net_device *dev,
if (dev->rx_handler)
return -EBUSY;
/* Note: rx_handler_data must be set before rx_handler */
rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
rcu_assign_pointer(dev->rx_handler, rx_handler);
@ -3334,6 +3334,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)
ASSERT_RTNL();
RCU_INIT_POINTER(dev->rx_handler, NULL);
/* a reader seeing a non NULL rx_handler in a rcu_read_lock()
* section has a guarantee to see a non NULL rx_handler_data
* as well.
*/
synchronize_net();
RCU_INIT_POINTER(dev->rx_handler_data, NULL);
}
EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);

View file

@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
struct flow_flush_info *info = data;
struct tasklet_struct *tasklet;
tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet);
tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;
tasklet->data = (unsigned long)info;
tasklet_schedule(tasklet);
}

View file

@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
}
if (ops->fill_info) {
data = nla_nest_start(skb, IFLA_INFO_DATA);
if (data == NULL)
if (data == NULL) {
err = -EMSGSIZE;
goto err_cancel_link;
}
err = ops->fill_info(skb, dev);
if (err < 0)
goto err_cancel_data;

View file

@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
ipv6_addr_loopback(&hdr->daddr))
goto err;
/* RFC4291 Errata ID: 3480
* Interface-Local scope spans only a single interface on a
* node and is useful only for loopback transmission of
* multicast. Packets with interface-local scope received
* from another node must be discarded.
*/
if (!(skb->pkt_type == PACKET_LOOPBACK ||
dev->flags & IFF_LOOPBACK) &&
ipv6_addr_is_multicast(&hdr->daddr) &&
IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
goto err;
/* RFC4291 2.7
* Nodes must not originate a packet to a multicast address whose scope
* field contains the reserved value 0; if such a packet is received, it

View file

@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)
hdr->sadb_msg_pid = c->portid;
hdr->sadb_msg_version = PF_KEY_V2;
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;

View file

@ -349,21 +349,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)
static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
int ret = 0;
int ret;
if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
return 0;
mutex_lock(&local->iflist_mtx);
ASSERT_RTNL();
if (local->monitor_sdata)
goto out_unlock;
return 0;
sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL);
if (!sdata) {
ret = -ENOMEM;
goto out_unlock;
}
if (!sdata)
return -ENOMEM;
/* set up data */
sdata->local = local;
@ -377,13 +375,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
if (WARN_ON(ret)) {
/* ok .. stupid driver, it asked for this! */
kfree(sdata);
goto out_unlock;
return ret;
}
ret = ieee80211_check_queues(sdata);
if (ret) {
kfree(sdata);
goto out_unlock;
return ret;
}
ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
@ -391,13 +389,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
if (ret) {
drv_remove_interface(local, sdata);
kfree(sdata);
goto out_unlock;
return ret;
}
mutex_lock(&local->iflist_mtx);
rcu_assign_pointer(local->monitor_sdata, sdata);
out_unlock:
mutex_unlock(&local->iflist_mtx);
return ret;
return 0;
}
static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
@ -407,14 +406,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))
return;
ASSERT_RTNL();
mutex_lock(&local->iflist_mtx);
sdata = rcu_dereference_protected(local->monitor_sdata,
lockdep_is_held(&local->iflist_mtx));
if (!sdata)
goto out_unlock;
if (!sdata) {
mutex_unlock(&local->iflist_mtx);
return;
}
rcu_assign_pointer(local->monitor_sdata, NULL);
mutex_unlock(&local->iflist_mtx);
synchronize_net();
ieee80211_vif_release_channel(sdata);
@ -422,8 +427,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)
drv_remove_interface(local, sdata);
kfree(sdata);
out_unlock:
mutex_unlock(&local->iflist_mtx);
}
/*

View file

@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
if (ieee80211_vif_is_mesh(&sdata->vif))
if (ieee80211_vif_is_mesh(&sdata->vif) &&
ieee80211_sdata_running(sdata))
ieee80211_queue_work(&local->hw, &sdata->work);
rcu_read_unlock();
}

View file

@ -3608,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)
/* Restart STA timers */
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list)
ieee80211_restart_sta_timer(sdata);
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (ieee80211_sdata_running(sdata))
ieee80211_restart_sta_timer(sdata);
}
rcu_read_unlock();
}

View file

@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
memset(nskb->cb, 0, sizeof(nskb->cb));
ieee80211_tx_skb(rx->sdata, nskb);
if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
IEEE80211_TX_CTL_NO_CCK_RATE;
if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
info->hw_queue =
local->hw.offchannel_tx_hw_queue;
}
__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
status->band);
}
dev_kfree_skb(rx->skb);
return RX_QUEUED;

View file

@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
struct ieee80211_local *local;
struct ieee80211_sub_if_data *sdata;
int ret, i;
bool have_key = false;
might_sleep();
@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
list_del_rcu(&sta->list);
mutex_lock(&local->key_mtx);
for (i = 0; i < NUM_DEFAULT_KEYS; i++)
for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
__ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i]));
if (sta->ptk)
have_key = true;
}
if (sta->ptk) {
__ieee80211_key_free(key_mtx_dereference(local, sta->ptk));
have_key = true;
}
mutex_unlock(&local->key_mtx);
if (!have_key)
synchronize_net();
sta->dead = true;
local->num_sta--;

View file

@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
flow->deficit = q->quantum;
flow->dropped = 0;
}
if (++sch->q.qlen < sch->limit)
if (++sch->q.qlen <= sch->limit)
return NET_XMIT_SUCCESS;
q->drop_overlimit++;

View file

@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)
u64 mult;
int shift;
r->rate_bps = rate << 3;
r->rate_bps = (u64)rate << 3;
r->shift = 0;
r->mult = 1;
/*

View file

@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)
rdev_rfkill_poll(rdev);
}
void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev)
{
lockdep_assert_held(&rdev->devlist_mtx);
lockdep_assert_held(&rdev->sched_scan_mtx);
if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE))
return;
if (!wdev->p2p_started)
return;
rdev_stop_p2p_device(rdev, wdev);
wdev->p2p_started = false;
rdev->opencount--;
if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
bool busy = work_busy(&rdev->scan_done_wk);
/*
* If the work isn't pending or running (in which case it would
* be waiting for the lock we hold) the driver didn't properly
* cancel the scan when the interface was removed. In this case
* warn and leak the scan request object to not crash later.
*/
WARN_ON(!busy);
rdev->scan_req->aborted = true;
___cfg80211_scan_done(rdev, !busy);
}
}
static int cfg80211_rfkill_set_block(void *data, bool blocked)
{
struct cfg80211_registered_device *rdev = data;
@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
return 0;
rtnl_lock();
mutex_lock(&rdev->devlist_mtx);
/* read-only iteration need not hold the devlist_mtx */
list_for_each_entry(wdev, &rdev->wdev_list, list) {
if (wdev->netdev) {
@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)
/* otherwise, check iftype */
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_DEVICE:
if (!wdev->p2p_started)
break;
rdev_stop_p2p_device(rdev, wdev);
wdev->p2p_started = false;
rdev->opencount--;
/* but this requires it */
mutex_lock(&rdev->devlist_mtx);
mutex_lock(&rdev->sched_scan_mtx);
cfg80211_stop_p2p_device(rdev, wdev);
mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
break;
default:
break;
}
}
mutex_unlock(&rdev->devlist_mtx);
rtnl_unlock();
return 0;
@ -745,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work)
wdev = container_of(work, struct wireless_dev, cleanup_work);
rdev = wiphy_to_dev(wdev->wiphy);
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->sched_scan_mtx);
if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
rdev->scan_req->aborted = true;
___cfg80211_scan_done(rdev, true);
}
cfg80211_unlock_rdev(rdev);
mutex_lock(&rdev->sched_scan_mtx);
if (WARN_ON(rdev->sched_scan_req &&
rdev->sched_scan_req->dev == wdev->netdev)) {
__cfg80211_stop_sched_scan(rdev, false);
@ -781,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
return;
mutex_lock(&rdev->devlist_mtx);
mutex_lock(&rdev->sched_scan_mtx);
list_del_rcu(&wdev->list);
rdev->devlist_generation++;
switch (wdev->iftype) {
case NL80211_IFTYPE_P2P_DEVICE:
if (!wdev->p2p_started)
break;
rdev_stop_p2p_device(rdev, wdev);
wdev->p2p_started = false;
rdev->opencount--;
cfg80211_stop_p2p_device(rdev, wdev);
break;
default:
WARN_ON_ONCE(1);
break;
}
mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
}
EXPORT_SYMBOL(cfg80211_unregister_wdev);
@ -936,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
cfg80211_update_iface_num(rdev, wdev->iftype, 1);
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
mutex_lock(&rdev->sched_scan_mtx);
wdev_lock(wdev);
switch (wdev->iftype) {
#ifdef CONFIG_CFG80211_WEXT
@ -967,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
break;
}
wdev_unlock(wdev);
mutex_unlock(&rdev->sched_scan_mtx);
rdev->opencount++;
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);

View file

@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,
void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,
enum nl80211_iftype iftype, int num);
void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
#define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10
#ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS

View file

@ -4702,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->scan)
return -EOPNOTSUPP;
if (rdev->scan_req)
return -EBUSY;
mutex_lock(&rdev->sched_scan_mtx);
if (rdev->scan_req) {
err = -EBUSY;
goto unlock;
}
if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {
n_channels = validate_scan_freqs(
info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]);
if (!n_channels)
return -EINVAL;
if (!n_channels) {
err = -EINVAL;
goto unlock;
}
} else {
enum ieee80211_band band;
n_channels = 0;
@ -4723,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)
n_ssids++;
if (n_ssids > wiphy->max_scan_ssids)
return -EINVAL;
if (n_ssids > wiphy->max_scan_ssids) {
err = -EINVAL;
goto unlock;
}
if (info->attrs[NL80211_ATTR_IE])
ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
else
ie_len = 0;
if (ie_len > wiphy->max_scan_ie_len)
return -EINVAL;
if (ie_len > wiphy->max_scan_ie_len) {
err = -EINVAL;
goto unlock;
}
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->channels) * n_channels
+ ie_len, GFP_KERNEL);
if (!request)
return -ENOMEM;
if (!request) {
err = -ENOMEM;
goto unlock;
}
if (n_ssids)
request->ssids = (void *)&request->channels[n_channels];
@ -4876,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
kfree(request);
}
unlock:
mutex_unlock(&rdev->sched_scan_mtx);
return err;
}
@ -7749,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->stop_p2p_device)
return -EOPNOTSUPP;
if (!wdev->p2p_started)
return 0;
rdev_stop_p2p_device(rdev, wdev);
wdev->p2p_started = false;
mutex_lock(&rdev->devlist_mtx);
rdev->opencount--;
mutex_unlock(&rdev->devlist_mtx);
if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {
rdev->scan_req->aborted = true;
___cfg80211_scan_done(rdev, true);
}
mutex_lock(&rdev->sched_scan_mtx);
cfg80211_stop_p2p_device(rdev, wdev);
mutex_unlock(&rdev->sched_scan_mtx);
return 0;
}
@ -8486,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
struct nlattr *nest;
int i;
ASSERT_RDEV_LOCK(rdev);
lockdep_assert_held(&rdev->sched_scan_mtx);
if (WARN_ON(!req))
return 0;

View file

@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)
union iwreq_data wrqu;
#endif
ASSERT_RDEV_LOCK(rdev);
lockdep_assert_held(&rdev->sched_scan_mtx);
request = rdev->scan_req;
@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)
rdev = container_of(wk, struct cfg80211_registered_device,
scan_done_wk);
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->sched_scan_mtx);
___cfg80211_scan_done(rdev, false);
cfg80211_unlock_rdev(rdev);
mutex_unlock(&rdev->sched_scan_mtx);
}
void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted)
@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);
if (found) {
found->pub.beacon_interval = tmp->pub.beacon_interval;
found->pub.signal = tmp->pub.signal;
found->pub.capability = tmp->pub.capability;
found->ts = tmp->ts;
/* Update IEs */
if (rcu_access_pointer(tmp->pub.proberesp_ies)) {
const struct cfg80211_bss_ies *old;
@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
if (found->pub.hidden_beacon_bss &&
!list_empty(&found->hidden_list)) {
const struct cfg80211_bss_ies *f;
/*
* The found BSS struct is one of the probe
* response members of a group, but we're
@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
* SSID to showing it, which is confusing so
* drop this information.
*/
f = rcu_access_pointer(tmp->pub.beacon_ies);
kfree_rcu((struct cfg80211_bss_ies *)f,
rcu_head);
goto drop;
}
@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
kfree_rcu((struct cfg80211_bss_ies *)old,
rcu_head);
}
found->pub.beacon_interval = tmp->pub.beacon_interval;
found->pub.signal = tmp->pub.signal;
found->pub.capability = tmp->pub.capability;
found->ts = tmp->ts;
} else {
struct cfg80211_internal_bss *new;
struct cfg80211_internal_bss *hidden;
@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (IS_ERR(rdev))
return PTR_ERR(rdev);
mutex_lock(&rdev->sched_scan_mtx);
if (rdev->scan_req) {
err = -EBUSY;
goto out;
@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
dev_hold(dev);
}
out:
mutex_unlock(&rdev->sched_scan_mtx);
kfree(creq);
cfg80211_unlock_rdev(rdev);
return err;

View file

@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
ASSERT_RTNL();
ASSERT_RDEV_LOCK(rdev);
ASSERT_WDEV_LOCK(wdev);
lockdep_assert_held(&rdev->sched_scan_mtx);
if (rdev->scan_req)
return -EBUSY;
@ -320,11 +321,9 @@ void cfg80211_sme_scan_done(struct net_device *dev)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
wdev_lock(wdev);
__cfg80211_sme_scan_done(dev);
wdev_unlock(wdev);
mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);
}
void cfg80211_sme_rx_auth(struct net_device *dev,
@ -924,9 +923,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,
int err;
mutex_lock(&rdev->devlist_mtx);
/* might request scan - scan_mtx -> wdev_mtx dependency */
mutex_lock(&rdev->sched_scan_mtx);
wdev_lock(dev->ieee80211_ptr);
err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);
wdev_unlock(dev->ieee80211_ptr);
mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
return err;

View file

@ -27,7 +27,8 @@
#define WIPHY_PR_ARG __entry->wiphy_name
#define WDEV_ENTRY __field(u32, id)
#define WDEV_ASSIGN (__entry->id) = (wdev ? wdev->identifier : 0)
#define WDEV_ASSIGN (__entry->id) = (!IS_ERR_OR_NULL(wdev) \
? wdev->identifier : 0)
#define WDEV_PR_FMT "wdev(%u)"
#define WDEV_PR_ARG (__entry->id)
@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl,
),
TP_fast_assign(
WIPHY_ASSIGN;
WIPHY_ASSIGN;
NETDEV_ASSIGN;
__entry->acl_policy = params->acl_policy;
),
TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d",

View file

@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
mutex_lock(&rdev->sched_scan_mtx);
wdev_lock(wdev);
if (wdev->sme_state != CFG80211_SME_IDLE) {
@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
err = cfg80211_mgd_wext_connect(rdev, wdev);
out:
wdev_unlock(wdev);
mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
return err;
@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
mutex_lock(&rdev->sched_scan_mtx);
wdev_lock(wdev);
err = 0;
@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,
err = cfg80211_mgd_wext_connect(rdev, wdev);
out:
wdev_unlock(wdev);
mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
return err;
@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
cfg80211_lock_rdev(rdev);
mutex_lock(&rdev->devlist_mtx);
mutex_lock(&rdev->sched_scan_mtx);
wdev_lock(wdev);
if (wdev->sme_state != CFG80211_SME_IDLE) {
@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,
err = cfg80211_mgd_wext_connect(rdev, wdev);
out:
wdev_unlock(wdev);
mutex_unlock(&rdev->sched_scan_mtx);
mutex_unlock(&rdev->devlist_mtx);
cfg80211_unlock_rdev(rdev);
return err;

View file

@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)
x->xflags &= ~XFRM_TIME_DEFER;
}
static void xfrm_replay_notify_esn(struct xfrm_state *x, int event)
{
u32 seq_diff, oseq_diff;
struct km_event c;
struct xfrm_replay_state_esn *replay_esn = x->replay_esn;
struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn;
/* we send notify messages in case
* 1. we updated on of the sequence numbers, and the seqno difference
* is at least x->replay_maxdiff, in this case we also update the
* timeout of our timer function
* 2. if x->replay_maxage has elapsed since last update,
* and there were changes
*
* The state structure must be locked!
*/
switch (event) {
case XFRM_REPLAY_UPDATE:
if (!x->replay_maxdiff)
break;
if (replay_esn->seq_hi == preplay_esn->seq_hi)
seq_diff = replay_esn->seq - preplay_esn->seq;
else
seq_diff = ~preplay_esn->seq + replay_esn->seq + 1;
if (replay_esn->oseq_hi == preplay_esn->oseq_hi)
oseq_diff = replay_esn->oseq - preplay_esn->oseq;
else
oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1;
if (seq_diff < x->replay_maxdiff &&
oseq_diff < x->replay_maxdiff) {
if (x->xflags & XFRM_TIME_DEFER)
event = XFRM_REPLAY_TIMEOUT;
else
return;
}
break;
case XFRM_REPLAY_TIMEOUT:
if (memcmp(x->replay_esn, x->preplay_esn,
xfrm_replay_state_esn_len(replay_esn)) == 0) {
x->xflags |= XFRM_TIME_DEFER;
return;
}
break;
}
memcpy(x->preplay_esn, x->replay_esn,
xfrm_replay_state_esn_len(replay_esn));
c.event = XFRM_MSG_NEWAE;
c.data.aevent = event;
km_state_notify(x, &c);
if (x->replay_maxage &&
!mod_timer(&x->rtimer, jiffies + x->replay_maxage))
x->xflags &= ~XFRM_TIME_DEFER;
}
static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)
{
int err = 0;
@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {
.advance = xfrm_replay_advance_esn,
.check = xfrm_replay_check_esn,
.recheck = xfrm_replay_recheck_esn,
.notify = xfrm_replay_notify_bmp,
.notify = xfrm_replay_notify_esn,
.overflow = xfrm_replay_overflow_esn,
};