summaryrefslogtreecommitdiff
path: root/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/tx.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c342
1 files changed, 298 insertions, 44 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 34731e29c..a0c1e3d07 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -67,6 +67,7 @@
#include <linux/etherdevice.h>
#include <linux/tcp.h>
#include <net/ip.h>
+#include <net/ipv6.h>
#include "iwl-trans.h"
#include "iwl-eeprom-parse.h"
@@ -98,6 +99,102 @@ iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
addr, tid, ssn);
}
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
+static void iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_tx_info *info,
+ struct iwl_tx_cmd *tx_cmd)
+{
+#if IS_ENABLED(CONFIG_INET)
+ u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
+ u16 offload_assist = le16_to_cpu(tx_cmd->offload_assist);
+ u8 protocol = 0;
+
+ /*
+ * Do not compute checksum if already computed or if transport will
+ * compute it
+ */
+ if (skb->ip_summed != CHECKSUM_PARTIAL || IWL_MVM_SW_TX_CSUM_OFFLOAD)
+ return;
+
+ /* We do not expect to be requested to csum stuff we do not support */
+ if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
+ (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6)),
+ "No support for requested checksum\n")) {
+ skb_checksum_help(skb);
+ return;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ protocol = ip_hdr(skb)->protocol;
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ipv6h =
+ (struct ipv6hdr *)skb_network_header(skb);
+ unsigned int off = sizeof(*ipv6h);
+
+ protocol = ipv6h->nexthdr;
+ while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
+ struct ipv6_opt_hdr *hp;
+
+ /* only supported extension headers */
+ if (protocol != NEXTHDR_ROUTING &&
+ protocol != NEXTHDR_HOP &&
+ protocol != NEXTHDR_DEST) {
+ skb_checksum_help(skb);
+ return;
+ }
+
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
+ protocol = hp->nexthdr;
+ off += ipv6_optlen(hp);
+ }
+ /* if we get here - protocol now should be TCP/UDP */
+#endif
+ }
+
+ if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
+ WARN_ON_ONCE(1);
+ skb_checksum_help(skb);
+ return;
+ }
+
+ /* enable L4 csum */
+ offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
+
+ /*
+ * Set offset to IP header (snap).
+ * We don't support tunneling so no need to take care of inner header.
+ * Size is in words.
+ */
+ offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
+
+ /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
+ if (skb->protocol == htons(ETH_P_IP) &&
+ (offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) {
+ ip_hdr(skb)->check = 0;
+ offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
+ }
+
+ /* reset UDP/TCP header csum */
+ if (protocol == IPPROTO_TCP)
+ tcp_hdr(skb)->check = 0;
+ else
+ udp_hdr(skb)->check = 0;
+
+ /* mac header len should include IV, size is in words */
+ if (info->control.hw_key)
+ mh_len += info->control.hw_key->iv_len;
+ mh_len /= 2;
+ offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
+
+ tx_cmd->offload_assist = cpu_to_le16(offload_assist);
+#endif
+}
+
/*
* Sets most of the Tx cmd's fields
*/
@@ -127,6 +224,9 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
+ tx_cmd->offload_assist |=
+ cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU));
} else if (ieee80211_is_back_req(fc)) {
struct ieee80211_bar *bar = (void *)skb->data;
u16 control = le16_to_cpu(bar->control);
@@ -187,9 +287,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Total # bytes to be transmitted */
tx_cmd->len = cpu_to_le16((u16)skb->len +
(uintptr_t)skb_info->driver_data[0]);
- tx_cmd->next_frame_len = 0;
tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
tx_cmd->sta_id = sta_id;
+
+ /* padding is inserted later in transport */
+ if (ieee80211_hdrlen(fc) % 4 &&
+ !(tx_cmd->offload_assist & cpu_to_le16(BIT(TX_CMD_OFFLD_AMSDU))))
+ tx_cmd->offload_assist |= cpu_to_le16(BIT(TX_CMD_OFFLD_PAD));
+
+ iwl_mvm_tx_csum(mvm, skb, hdr, info, tx_cmd);
}
/*
@@ -245,7 +351,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
&mvm->nvm_data->bands[info->band], sta);
/* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
+ if (info->band == NL80211_BAND_5GHZ)
rate_idx += IWL_FIRST_OFDM_RATE;
/* For 2.4 GHZ band, check that there is no need to remap */
@@ -258,7 +364,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm),
mvm->mgmt_last_antenna_idx);
- if (info->band == IEEE80211_BAND_2GHZ &&
+ if (info->band == NL80211_BAND_2GHZ &&
!iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
else
@@ -273,6 +379,23 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
tx_cmd->rate_n_flags = cpu_to_le32((u32)rate_plcp | rate_flags);
}
+static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
+ u8 *crypto_hdr)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+ u64 pn;
+
+ pn = atomic64_inc_return(&keyconf->tx_pn);
+ crypto_hdr[0] = pn;
+ crypto_hdr[2] = 0;
+ crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
+ crypto_hdr[1] = pn >> 8;
+ crypto_hdr[4] = pn >> 16;
+ crypto_hdr[5] = pn >> 24;
+ crypto_hdr[6] = pn >> 32;
+ crypto_hdr[7] = pn >> 40;
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -290,15 +413,7 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_CCMP_256:
iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
- pn = atomic64_inc_return(&keyconf->tx_pn);
- crypto_hdr[0] = pn;
- crypto_hdr[2] = 0;
- crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
- crypto_hdr[1] = pn >> 8;
- crypto_hdr[4] = pn >> 16;
- crypto_hdr[5] = pn >> 24;
- crypto_hdr[6] = pn >> 32;
- crypto_hdr[7] = pn >> 40;
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
break;
case WLAN_CIPHER_SUITE_TKIP:
@@ -318,6 +433,18 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ /* TODO: Taking the key from the table might introduce a race
+ * when PTK rekeying is done, having an old packets with a PN
+ * based on the old key but the message encrypted with a new
+ * one.
+ * Need to handle this.
+ */
+ tx_cmd->sec_ctl |= TX_CMD_SEC_GCMP | TC_CMD_SEC_KEY_FROM_TABLE;
+ tx_cmd->key[0] = keyconf->hw_key_idx;
+ iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
+ break;
default:
tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
}
@@ -360,6 +487,21 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
return dev_cmd;
}
+static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ if (info->control.vif->type == NL80211_IFTYPE_AP &&
+ ieee80211_is_probe_resp(fc))
+ return IWL_MVM_DQA_AP_PROBE_RESP_QUEUE;
+ else if (ieee80211_is_mgmt(fc) &&
+ info->control.vif->type == NL80211_IFTYPE_P2P_DEVICE)
+ return IWL_MVM_DQA_P2P_DEVICE_QUEUE;
+ }
+
+ return info->hw_queue;
+}
+
int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -369,6 +511,16 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
struct iwl_tx_cmd *tx_cmd;
u8 sta_id;
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
+ int queue;
+
+ /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
+ * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
+ * queue. STATION (HS2.0) uses the auxiliary context of the FW,
+ * and hence needs to be sent on the aux queue
+ */
+ if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
+ skb_info->control.vif->type == NL80211_IFTYPE_STATION)
+ IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
memcpy(&info, skb->cb, sizeof(info));
@@ -383,15 +535,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* This holds the amsdu headers length */
skb_info->driver_data[0] = (void *)(uintptr_t)0;
- /*
- * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
- * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
- * queue. STATION (HS2.0) uses the auxiliary context of the FW,
- * and hence needs to be sent on the aux queue
- */
- if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
- info.control.vif->type == NL80211_IFTYPE_STATION)
- IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
+ queue = info.hw_queue;
/*
* If the interface on which the frame is sent is the P2P_DEVICE
@@ -401,6 +545,9 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
* (this is not possible for unicast packets as a TLDS discovery
* response are sent without a station entry); otherwise use the
* AUX station.
+ * In DQA mode, if vif is of type STATION and frames are not multicast,
+ * they should be sent from the BSS queue. For example, TDLS setup
+ * frames should be sent on this queue, as they go through the AP.
*/
sta_id = mvm->aux_sta.sta_id;
if (info.control.vif) {
@@ -408,18 +555,23 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
iwl_mvm_vif_from_mac80211(info.control.vif);
if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
- info.control.vif->type == NL80211_IFTYPE_AP)
+ info.control.vif->type == NL80211_IFTYPE_AP) {
sta_id = mvmvif->bcast_sta.sta_id;
- else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
- is_multicast_ether_addr(hdr->addr1)) {
+ queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info,
+ hdr->frame_control);
+ } else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
+ is_multicast_ether_addr(hdr->addr1)) {
u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id);
if (ap_sta_id != IWL_MVM_STATION_COUNT)
sta_id = ap_sta_id;
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ info.control.vif->type == NL80211_IFTYPE_STATION) {
+ queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
}
}
- IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, info.hw_queue);
+ IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id);
if (!dev_cmd)
@@ -430,7 +582,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
- if (iwl_trans_tx(mvm->trans, skb, dev_cmd, info.hw_queue)) {
+ if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
return -1;
}
@@ -463,6 +615,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
u16 amsdu_add, snap_ip_tcp, pad, i = 0;
unsigned int dbg_max_amsdu_len;
+ netdev_features_t netdev_features = NETIF_F_CSUM_MASK | NETIF_F_SG;
u8 *qc, tid, txf;
snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) +
@@ -473,15 +626,30 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
+ dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
+
if (!sta->max_amsdu_len ||
!ieee80211_is_data_qos(hdr->frame_control) ||
- !mvmsta->tlc_amsdu) {
+ (!mvmsta->tlc_amsdu && !dbg_max_amsdu_len)) {
num_subframes = 1;
pad = 0;
goto segment;
}
/*
+ * Do not build AMSDU for IPv6 with extension headers.
+ * ask stack to segment and checkum the generated MPDUs for us.
+ */
+ if (skb->protocol == htons(ETH_P_IPV6) &&
+ ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
+ IPPROTO_TCP) {
+ num_subframes = 1;
+ pad = 0;
+ netdev_features &= ~NETIF_F_CSUM_MASK;
+ goto segment;
+ }
+
+ /*
* No need to lock amsdu_in_ampdu_allowed since it can't be modified
* during an BA session.
*/
@@ -493,7 +661,6 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
max_amsdu_len = sta->max_amsdu_len;
- dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len);
/* the Tx FIFO to which this A-MSDU will be routed */
txf = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
@@ -507,7 +674,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
mvm->shared_mem_cfg.txfifo_size[txf] - 256);
- if (dbg_max_amsdu_len)
+ if (unlikely(dbg_max_amsdu_len))
max_amsdu_len = min_t(unsigned int, max_amsdu_len,
dbg_max_amsdu_len);
@@ -575,7 +742,7 @@ segment:
skb_shinfo(skb)->gso_size = num_subframes * mss;
memcpy(cb, skb->cb, sizeof(cb));
- next = skb_gso_segment(skb, NETIF_F_CSUM_MASK | NETIF_F_SG);
+ next = skb_gso_segment(skb, netdev_features);
skb_shinfo(skb)->gso_size = mss;
if (WARN_ON_ONCE(IS_ERR(next)))
return -EINVAL;
@@ -641,6 +808,35 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
}
#endif
+static void iwl_mvm_tx_add_stream(struct iwl_mvm *mvm,
+ struct iwl_mvm_sta *mvm_sta, u8 tid,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ u8 mac_queue = info->hw_queue;
+ struct sk_buff_head *deferred_tx_frames;
+
+ lockdep_assert_held(&mvm_sta->lock);
+
+ mvm_sta->deferred_traffic_tid_map |= BIT(tid);
+ set_bit(mvm_sta->sta_id, mvm->sta_deferred_frames);
+
+ deferred_tx_frames = &mvm_sta->tid_data[tid].deferred_tx_frames;
+
+ skb_queue_tail(deferred_tx_frames, skb);
+
+ /*
+ * The first deferred frame should've stopped the MAC queues, so we
+ * should never get a second deferred frame for the RA/TID.
+ */
+ if (!WARN(skb_queue_len(deferred_tx_frames) != 1,
+ "RATID %d/%d has %d deferred frames\n", mvm_sta->sta_id, tid,
+ skb_queue_len(deferred_tx_frames))) {
+ iwl_mvm_stop_mac_queues(mvm, BIT(mac_queue));
+ schedule_work(&mvm->add_stream_wk);
+ }
+}
+
/*
* Sets the fields in the Tx cmd that are crypto related
*/
@@ -656,7 +852,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
u16 seq_number = 0;
u8 tid = IWL_MAX_TID_COUNT;
u8 txq_id = info->hw_queue;
- bool is_data_qos = false, is_ampdu = false;
+ bool is_ampdu = false;
int hdrlen;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
@@ -697,8 +893,21 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
seq_number &= IEEE80211_SCTL_SEQ;
hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
hdr->seq_ctrl |= cpu_to_le16(seq_number);
- is_data_qos = true;
is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
+ } else if (iwl_mvm_is_dqa_supported(mvm) &&
+ (ieee80211_is_qos_nullfunc(fc) ||
+ ieee80211_is_nullfunc(fc))) {
+ /*
+ * nullfunc frames should go to the MGMT queue regardless of QOS
+ */
+ tid = IWL_MAX_TID_COUNT;
+ }
+
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ txq_id = mvmsta->tid_data[tid].txq_id;
+
+ if (ieee80211_is_mgmt(fc))
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
}
/* Copy MAC header from skb into command buffer */
@@ -706,7 +915,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
- if (sta->tdls) {
+ if (sta->tdls && !iwl_mvm_is_dqa_supported(mvm)) {
/* default to TID 0 for non-QoS packets */
u8 tdls_tid = tid == IWL_MAX_TID_COUNT ? 0 : tid;
@@ -719,18 +928,50 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
txq_id = mvmsta->tid_data[tid].txq_id;
}
+ /* Check if TXQ needs to be allocated or re-activated */
+ if (unlikely(txq_id == IEEE80211_INVAL_HW_QUEUE ||
+ !mvmsta->tid_data[tid].is_tid_active) &&
+ iwl_mvm_is_dqa_supported(mvm)) {
+ /* If TXQ needs to be allocated... */
+ if (txq_id == IEEE80211_INVAL_HW_QUEUE) {
+ iwl_mvm_tx_add_stream(mvm, mvmsta, tid, skb);
+
+ /*
+ * The frame is now deferred, and the worker scheduled
+ * will re-allocate it, so we can free it for now.
+ */
+ iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
+ spin_unlock(&mvmsta->lock);
+ return 0;
+
+ }
+
+ /* If we are here - TXQ exists and needs to be re-activated */
+ spin_lock(&mvm->queue_info_lock);
+ mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
+ mvmsta->tid_data[tid].is_tid_active = true;
+ spin_unlock(&mvm->queue_info_lock);
+
+ IWL_DEBUG_TX_QUEUES(mvm, "Re-activating queue %d for TX\n",
+ txq_id);
+ }
+
+ /* Keep track of the time of the last frame for this RA/TID */
+ mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
+
IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x\n", mvmsta->sta_id,
tid, txq_id, IEEE80211_SEQ_TO_SN(seq_number));
if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
goto drop_unlock_sta;
- if (is_data_qos && !ieee80211_has_morefrags(fc))
+ if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
spin_unlock(&mvmsta->lock);
- if (txq_id < mvm->first_agg_queue)
+ /* Increase pending frames count if this isn't AMPDU */
+ if (!is_ampdu)
atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
return 0;
@@ -883,7 +1124,7 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
#endif /* CONFIG_IWLWIFI_DEBUG */
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
- enum ieee80211_band band,
+ enum nl80211_band band,
struct ieee80211_tx_rate *r)
{
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
@@ -978,6 +1219,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
u8 skb_freed = 0;
u16 next_reclaimed, seq_ctl;
bool is_ndp = false;
+ bool txq_agg = false; /* Is this TXQ aggregated */
__skb_queue_head_init(&skbs);
@@ -1108,6 +1350,16 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
bool send_eosp_ndp = false;
spin_lock_bh(&mvmsta->lock);
+ if (iwl_mvm_is_dqa_supported(mvm)) {
+ enum iwl_mvm_agg_state state;
+
+ state = mvmsta->tid_data[tid].state;
+ txq_agg = (state == IWL_AGG_ON ||
+ state == IWL_EMPTYING_HW_QUEUE_DELBA);
+ } else {
+ txq_agg = txq_id >= mvm->first_agg_queue;
+ }
+
if (!is_ndp) {
tid_data->next_reclaimed = next_reclaimed;
IWL_DEBUG_TX_REPLY(mvm,
@@ -1163,11 +1415,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id >= mvm->first_agg_queue)
+ if (txq_agg)
goto out;
/* We can't free more than one frame at once on a shared queue */
- WARN_ON(skb_freed > 1);
+ WARN_ON(!iwl_mvm_is_dqa_supported(mvm) && (skb_freed > 1));
/* If we have still frames for this STA nothing to do here */
if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
@@ -1261,9 +1513,12 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
+ int queue = SEQ_TO_QUEUE(sequence);
- if (WARN_ON_ONCE(SEQ_TO_QUEUE(sequence) < mvm->first_agg_queue))
+ if (WARN_ON_ONCE(queue < mvm->first_agg_queue &&
+ (!iwl_mvm_is_dqa_supported(mvm) ||
+ (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE))))
return;
if (WARN_ON_ONCE(tid == IWL_TID_NON_QOS))
@@ -1273,10 +1528,9 @@ static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
rcu_read_lock();
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
- if (!WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
- struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ if (!WARN_ON_ONCE(!mvmsta)) {
mvmsta->tid_data[tid].rate_n_flags =
le32_to_cpu(tx_resp->initial_rate);
mvmsta->tid_data[tid].tx_time =
@@ -1435,7 +1689,7 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
- iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info);
+ iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
}
out: