linux-hardened/drivers/net/wireless/mediatek/mt76/tx.c
David S. Miller e69fbf31ca wireless-drivers-next patches for 4.21
Last set of patches for 4.21. mt76 is still in very active development
 and having some refactoring as well as new features. But also other
 drivers got few new features and fixes.
 
 Major changes:
 
 ath10k
 
 * add amsdu support for QCA6174 monitor mode
 
 * report tx rate using the new ieee80211_tx_rate_update() API
 
 * wcn3990 support is not experimental anymore
 
 iwlwifi
 
 * support for FW version 43 for 9000 and 22000 series
 
 brcmfmac
 
 * add support for CYW43012 SDIO chipset
 
 * add the raw 4354 PCIe device ID for unprogrammed Cypress boards
 
 mwifiex
 
 * add NL80211_STA_INFO_RX_BITRATE support
 
 mt76
 
 * use the same firmware for mt76x2e and mt76x2u
 
 * mt76x0e survey support
 
 * more unification between mt76x2 and mt76x0
 
 * mt76x0e AP mode support
 
 * mt76x0e DFS support
 
 * rework and fix tx status handling for mt76x0 and mt76x2
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQEcBAABAgAGBQJcG9TEAAoJEG4XJFUm622byW8H/1vMVJhXwgIZbHeoUKNa47Yp
 Z7Jv5vW8IGXu+lp7DyoedDCbq4+lskNSlDV1DmysNChLgDnApU/3oCd/jH8EiGPV
 JAFUHb85HuVLTTpPpNHtnYz3IzL7r098TNVxOU0VD+xILM0Mf0aCeXztgmFWpGaY
 /rfHkId8oKUezIjdu6Dc96mqITrT6WRNtnOMfjr6dZPjClRTS44Hyz3Ga3rXABBL
 /n8BCkl0GpKGrL3mBy2CCR5mVY8zfxMB4Aj2zx7bccZ8i2i2QjrGlXCHyB6ImNrR
 lv4L1fUVXZWVdeOe8EbpftY7zEsPrX+XNm6h1kckdB7UyuBROpQLsVb+yxlLh9g=
 =mhAw
 -----END PGP SIGNATURE-----

Merge tag 'wireless-drivers-next-for-davem-2018-12-20' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Kalle Valo says:

====================
wireless-drivers-next patches for 4.21

Last set of patches for 4.21. mt76 is still in very active development
and having some refactoring as well as new features. But also other
drivers got few new features and fixes.

Major changes:

ath10k

* add amsdu support for QCA6174 monitor mode

* report tx rate using the new ieee80211_tx_rate_update() API

* wcn3990 support is not experimental anymore

iwlwifi

* support for FW version 43 for 9000 and 22000 series

brcmfmac

* add support for CYW43012 SDIO chipset

* add the raw 4354 PCIe device ID for unprogrammed Cypress boards

mwifiex

* add NL80211_STA_INFO_RX_BITRATE support

mt76

* use the same firmware for mt76x2e and mt76x2u

* mt76x0e survey support

* more unification between mt76x2 and mt76x0

* mt76x0e AP mode support

* mt76x0e DFS support

* rework and fix tx status handling for mt76x0 and mt76x2
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
2018-12-20 16:47:10 -08:00

631 lines
14 KiB
C

/*
* Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "mt76.h"
static struct mt76_txwi_cache *
mt76_alloc_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
dma_addr_t addr;
int size;
size = (sizeof(*t) + L1_CACHE_BYTES - 1) & ~(L1_CACHE_BYTES - 1);
t = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
if (!t)
return NULL;
addr = dma_map_single(dev->dev, &t->txwi, sizeof(t->txwi),
DMA_TO_DEVICE);
t->dma_addr = addr;
return t;
}
static struct mt76_txwi_cache *
__mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
spin_lock_bh(&dev->lock);
if (!list_empty(&dev->txwi_cache)) {
t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
spin_unlock_bh(&dev->lock);
return t;
}
struct mt76_txwi_cache *
mt76_get_txwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
if (t)
return t;
return mt76_alloc_txwi(dev);
}
void
mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
if (!t)
return;
spin_lock_bh(&dev->lock);
list_add(&t->list, &dev->txwi_cache);
spin_unlock_bh(&dev->lock);
}
void mt76_tx_free(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t;
while ((t = __mt76_get_txwi(dev)) != NULL)
dma_unmap_single(dev->dev, t->dma_addr, sizeof(t->txwi),
DMA_TO_DEVICE);
}
static int
mt76_txq_get_qid(struct ieee80211_txq *txq)
{
if (!txq->sta)
return MT_TXQ_BE;
return txq->ac;
}
static void
mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
if (!ieee80211_is_data_qos(hdr->frame_control) ||
!ieee80211_is_data_present(hdr->frame_control))
return;
mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
}
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
__acquires(&dev->status_list.lock)
{
__skb_queue_head_init(list);
spin_lock_bh(&dev->status_list.lock);
__acquire(&dev->status_list.lock);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
__releases(&dev->status_list.unlock)
{
struct sk_buff *skb;
spin_unlock_bh(&dev->status_list.lock);
__release(&dev->status_list.unlock);
while ((skb = __skb_dequeue(list)) != NULL)
ieee80211_tx_status(dev->hw, skb);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
static void
__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
struct sk_buff_head *list)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
flags |= cb->flags;
cb->flags = flags;
if ((flags & done) != done)
return;
__skb_unlink(skb, &dev->status_list);
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
if (flags & MT_TX_CB_TXS_FAILED) {
ieee80211_tx_info_clear_status(info);
info->status.rates[0].idx = -1;
info->flags |= IEEE80211_TX_STAT_ACK;
}
__skb_queue_tail(list, skb);
}
void
mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
struct sk_buff_head *list)
{
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
int
mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
int pid;
if (!wcid)
return 0;
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
return MT_PACKET_ID_NO_ACK;
if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
return 0;
spin_lock_bh(&dev->status_list.lock);
memset(cb, 0, sizeof(*cb));
wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK)
wcid->packet_id = 1;
pid = wcid->packet_id;
cb->wcid = wcid->idx;
cb->pktid = pid;
cb->jiffies = jiffies;
__skb_queue_tail(&dev->status_list, skb);
spin_unlock_bh(&dev->status_list.lock);
return pid;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
struct sk_buff *
mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list)
{
struct sk_buff *skb, *tmp;
if (pktid == MT_PACKET_ID_NO_ACK)
return NULL;
skb_queue_walk_safe(&dev->status_list, skb, tmp) {
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
if (wcid && cb->wcid != wcid->idx)
continue;
if (cb->pktid == pktid)
return skb;
if (!pktid &&
!time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT))
continue;
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
MT_TX_CB_TXS_DONE, list);
}
return NULL;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
void
mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
{
struct sk_buff_head list;
mt76_tx_status_lock(dev, &list);
mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
{
struct sk_buff_head list;
if (!skb->prev) {
ieee80211_free_txskb(dev->hw, skb);
return;
}
mt76_tx_status_lock(dev, &list);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
void
mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct mt76_wcid *wcid, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct mt76_queue *q;
int qid = skb_get_queue_mapping(skb);
if (WARN_ON(qid >= MT_TXQ_PSD)) {
qid = MT_TXQ_BE;
skb_set_queue_mapping(skb, qid);
}
if (!wcid->tx_rate_set)
ieee80211_get_tx_rates(info->control.vif, sta, skb,
info->control.rates, 1);
if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
struct ieee80211_txq *txq;
struct mt76_txq *mtxq;
u8 tid;
tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
txq = sta->txq[tid];
mtxq = (struct mt76_txq *) txq->drv_priv;
if (mtxq->aggr)
mt76_check_agg_ssn(mtxq, skb);
}
q = &dev->q_tx[qid];
spin_lock_bh(&q->lock);
dev->queue_ops->tx_queue_skb(dev, q, skb, wcid, sta);
dev->queue_ops->kick(dev, q);
if (q->queued > q->ndesc - 8)
ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
spin_unlock_bh(&q->lock);
}
EXPORT_SYMBOL_GPL(mt76_tx);
static struct sk_buff *
mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct sk_buff *skb;
skb = skb_dequeue(&mtxq->retry_q);
if (skb) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
if (ps && skb_queue_empty(&mtxq->retry_q))
ieee80211_sta_set_buffered(txq->sta, tid, false);
return skb;
}
skb = ieee80211_tx_dequeue(dev->hw, txq);
if (!skb)
return NULL;
return skb;
}
static void
mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
struct sk_buff *skb, bool last)
{
struct mt76_wcid *wcid = (struct mt76_wcid *) sta->drv_priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
if (last)
info->flags |= IEEE80211_TX_STATUS_EOSP;
mt76_skb_set_moredata(skb, !last);
dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, sta);
}
void
mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
u16 tids, int nframes,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct mt76_dev *dev = hw->priv;
struct sk_buff *last_skb = NULL;
struct mt76_queue *hwq = &dev->q_tx[MT_TXQ_PSD];
int i;
spin_lock_bh(&hwq->lock);
for (i = 0; tids && nframes; i++, tids >>= 1) {
struct ieee80211_txq *txq = sta->txq[i];
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct sk_buff *skb;
if (!(tids & 1))
continue;
do {
skb = mt76_txq_dequeue(dev, mtxq, true);
if (!skb)
break;
if (mtxq->aggr)
mt76_check_agg_ssn(mtxq, skb);
nframes--;
if (last_skb)
mt76_queue_ps_skb(dev, sta, last_skb, false);
last_skb = skb;
} while (nframes);
}
if (last_skb) {
mt76_queue_ps_skb(dev, sta, last_skb, true);
dev->queue_ops->kick(dev, hwq);
}
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
static int
mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_queue *hwq,
struct mt76_txq *mtxq, bool *empty)
{
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_tx_info *info;
struct mt76_wcid *wcid = mtxq->wcid;
struct sk_buff *skb;
int n_frames = 1, limit;
struct ieee80211_tx_rate tx_rate;
bool ampdu;
bool probe;
int idx;
skb = mt76_txq_dequeue(dev, mtxq, false);
if (!skb) {
*empty = true;
return 0;
}
info = IEEE80211_SKB_CB(skb);
if (!wcid->tx_rate_set)
ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
info->control.rates, 1);
tx_rate = info->control.rates[0];
probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
limit = ampdu ? 16 : 3;
if (ampdu)
mt76_check_agg_ssn(mtxq, skb);
idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid, txq->sta);
if (idx < 0)
return idx;
do {
bool cur_ampdu;
if (probe)
break;
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
test_bit(MT76_RESET, &dev->state))
return -EBUSY;
skb = mt76_txq_dequeue(dev, mtxq, false);
if (!skb) {
*empty = true;
break;
}
info = IEEE80211_SKB_CB(skb);
cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
if (ampdu != cur_ampdu ||
(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
skb_queue_tail(&mtxq->retry_q, skb);
break;
}
info->control.rates[0] = tx_rate;
if (cur_ampdu)
mt76_check_agg_ssn(mtxq, skb);
idx = dev->queue_ops->tx_queue_skb(dev, hwq, skb, wcid,
txq->sta);
if (idx < 0)
return idx;
n_frames++;
} while (n_frames < limit);
if (!probe) {
hwq->swq_queued++;
hwq->entry[idx].schedule = true;
}
dev->queue_ops->kick(dev, hwq);
return n_frames;
}
static int
mt76_txq_schedule_list(struct mt76_dev *dev, struct mt76_queue *hwq)
{
struct mt76_txq *mtxq, *mtxq_last;
int len = 0;
restart:
mtxq_last = list_last_entry(&hwq->swq, struct mt76_txq, list);
while (!list_empty(&hwq->swq)) {
bool empty = false;
int cur;
if (test_bit(MT76_OFFCHANNEL, &dev->state) ||
test_bit(MT76_RESET, &dev->state))
return -EBUSY;
mtxq = list_first_entry(&hwq->swq, struct mt76_txq, list);
if (mtxq->send_bar && mtxq->aggr) {
struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
struct ieee80211_sta *sta = txq->sta;
struct ieee80211_vif *vif = txq->vif;
u16 agg_ssn = mtxq->agg_ssn;
u8 tid = txq->tid;
mtxq->send_bar = false;
spin_unlock_bh(&hwq->lock);
ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
spin_lock_bh(&hwq->lock);
goto restart;
}
list_del_init(&mtxq->list);
cur = mt76_txq_send_burst(dev, hwq, mtxq, &empty);
if (!empty)
list_add_tail(&mtxq->list, &hwq->swq);
if (cur < 0)
return cur;
len += cur;
if (mtxq == mtxq_last)
break;
}
return len;
}
void mt76_txq_schedule(struct mt76_dev *dev, struct mt76_queue *hwq)
{
int len;
rcu_read_lock();
do {
if (hwq->swq_queued >= 4 || list_empty(&hwq->swq))
break;
len = mt76_txq_schedule_list(dev, hwq);
} while (len > 0);
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule);
void mt76_txq_schedule_all(struct mt76_dev *dev)
{
int i;
for (i = 0; i <= MT_TXQ_BK; i++) {
struct mt76_queue *q = &dev->q_tx[i];
spin_lock_bh(&q->lock);
mt76_txq_schedule(dev, q);
spin_unlock_bh(&q->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
bool send_bar)
{
int i;
for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
struct ieee80211_txq *txq = sta->txq[i];
struct mt76_txq *mtxq;
if (!txq)
continue;
mtxq = (struct mt76_txq *)txq->drv_priv;
spin_lock_bh(&mtxq->hwq->lock);
mtxq->send_bar = mtxq->aggr && send_bar;
if (!list_empty(&mtxq->list))
list_del_init(&mtxq->list);
spin_unlock_bh(&mtxq->hwq->lock);
}
}
EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
{
struct mt76_dev *dev = hw->priv;
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
struct mt76_queue *hwq = mtxq->hwq;
spin_lock_bh(&hwq->lock);
if (list_empty(&mtxq->list))
list_add_tail(&mtxq->list, &hwq->swq);
mt76_txq_schedule(dev, hwq);
spin_unlock_bh(&hwq->lock);
}
EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq;
struct mt76_queue *hwq;
struct sk_buff *skb;
if (!txq)
return;
mtxq = (struct mt76_txq *) txq->drv_priv;
hwq = mtxq->hwq;
spin_lock_bh(&hwq->lock);
if (!list_empty(&mtxq->list))
list_del_init(&mtxq->list);
spin_unlock_bh(&hwq->lock);
while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
ieee80211_free_txskb(dev->hw, skb);
}
EXPORT_SYMBOL_GPL(mt76_txq_remove);
void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
{
struct mt76_txq *mtxq = (struct mt76_txq *) txq->drv_priv;
INIT_LIST_HEAD(&mtxq->list);
skb_queue_head_init(&mtxq->retry_q);
mtxq->hwq = &dev->q_tx[mt76_txq_get_qid(txq)];
}
EXPORT_SYMBOL_GPL(mt76_txq_init);
u8 mt76_ac_to_hwq(u8 ac)
{
static const u8 wmm_queue_map[] = {
[IEEE80211_AC_BE] = 0,
[IEEE80211_AC_BK] = 1,
[IEEE80211_AC_VI] = 2,
[IEEE80211_AC_VO] = 3,
};
if (WARN_ON(ac >= IEEE80211_NUM_ACS))
return 0;
return wmm_queue_map[ac];
}
EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);