diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/sta.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/mvm/sta.c | 1189 |
1 files changed, 1127 insertions, 62 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index ef99942d7..e933c12d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -111,7 +111,7 @@ static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, /* send station add/update command to firmware */ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - bool update) + bool update, unsigned int flags) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd add_sta_cmd = { @@ -126,9 +126,12 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, u32 status; u32 agg_size = 0, mpdu_dens = 0; - if (!update) { + if (!update || (flags & STA_MODIFY_QUEUES)) { add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); + + if (flags & STA_MODIFY_QUEUES) + add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; } switch (sta->bandwidth) { @@ -220,6 +223,39 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return ret; } +static void iwl_mvm_rx_agg_session_expired(unsigned long data) +{ + struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data; + struct iwl_mvm_baid_data *ba_data; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvm_sta; + unsigned long timeout; + + rcu_read_lock(); + + ba_data = rcu_dereference(*rcu_ptr); + + if (WARN_ON(!ba_data)) + goto unlock; + + if (!ba_data->timeout) + goto unlock; + + timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); + if (time_is_after_jiffies(timeout)) { + mod_timer(&ba_data->session_timer, timeout); + goto unlock; + } + + /* Timer expired */ + sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); + mvm_sta = iwl_mvm_sta_from_mac80211(sta); + ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, + sta->addr, ba_data->tid); +unlock: + rcu_read_unlock(); +} + static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta) { @@ -274,6 +310,657 @@ static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); } +/* Disable aggregations for a bitmap of TIDs for a given station */ +static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, + unsigned long disable_agg_tids, + bool remove_queue) +{ + struct iwl_mvm_add_sta_cmd cmd = {}; + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + u32 status; + u8 sta_id; + int ret; + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + spin_unlock_bh(&mvm->queue_info_lock); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return -EINVAL; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + mvmsta->tid_disable_agg |= disable_agg_tids; + + cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); + cmd.sta_id = mvmsta->sta_id; + cmd.add_modify = STA_MODE_MODIFY; + cmd.modify_mask = STA_MODIFY_QUEUES; + if (disable_agg_tids) + cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; + if (remove_queue) + cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; + cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); + cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); + + rcu_read_unlock(); + + /* Notify FW of queue removal from the STA queues */ + status = ADD_STA_SUCCESS; + ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, + iwl_mvm_add_sta_cmd_size(mvm), + &cmd, &status); + + return ret; +} + +static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + unsigned long tid_bitmap; + unsigned long agg_tids = 0; + s8 sta_id; + int tid; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) + return -EINVAL; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvmsta->lock); + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) + agg_tids |= BIT(tid); + } + spin_unlock_bh(&mvmsta->lock); + + return agg_tids; +} + +/* + * Remove a queue from a station's resources. + * Note that this only marks as free. It DOESN'T delete a BA agreement, and + * doesn't disable the queue + */ +static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) +{ + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + unsigned long tid_bitmap; + unsigned long disable_agg_tids = 0; + u8 sta_id; + int tid; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvm->queue_info_lock); + sta_id = mvm->queue_info[queue].ra_sta_id; + tid_bitmap = mvm->queue_info[queue].tid_bitmap; + spin_unlock_bh(&mvm->queue_info_lock); + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return 0; + } + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + + spin_lock_bh(&mvmsta->lock); + /* Unmap MAC queues and TIDs from this queue */ + for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { + if (mvmsta->tid_data[tid].state == IWL_AGG_ON) + disable_agg_tids |= BIT(tid); + mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; + } + + mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ + spin_unlock_bh(&mvmsta->lock); + + rcu_read_unlock(); + + spin_lock_bh(&mvm->queue_info_lock); + /* Unmap MAC queues and TIDs from this queue */ + mvm->queue_info[queue].hw_queue_to_mac80211 = 0; + mvm->queue_info[queue].hw_queue_refcount = 0; + mvm->queue_info[queue].tid_bitmap = 0; + spin_unlock_bh(&mvm->queue_info_lock); + + return disable_agg_tids; +} + +static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, + unsigned long tfd_queue_mask, u8 ac) +{ + int queue = 0; + u8 ac_to_queue[IEEE80211_NUM_ACS]; + int i; + + lockdep_assert_held(&mvm->queue_info_lock); + + memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); + + /* See what ACs the existing queues for this STA have */ + for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { + /* Only DATA queues can be shared */ + if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && + i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) + continue; + + ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; + } + + /* + * The queue to share is chosen only from DATA queues as follows (in + * descending priority): + * 1. An AC_BE queue + * 2. Same AC queue + * 3. Highest AC queue that is lower than new AC + * 4. Any existing AC (there always is at least 1 DATA queue) + */ + + /* Priority 1: An AC_BE queue */ + if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_BE]; + /* Priority 2: Same AC queue */ + else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[ac]; + /* Priority 3a: If new AC is VO and VI exists - use VI */ + else if (ac == IEEE80211_AC_VO && + ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_VI]; + /* Priority 3b: No BE so only AC less than the new one is BK */ + else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_BK]; + /* Priority 4a: No BE nor BK - use VI if exists */ + else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_VI]; + /* Priority 4b: No BE, BK nor VI - use VO if exists */ + else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) + queue = ac_to_queue[IEEE80211_AC_VO]; + + /* Make sure queue found (or not) is legal */ + if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE && + queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) || + (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE && + queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) || + (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) { + IWL_ERR(mvm, "No DATA queues available to share\n"); + queue = -ENOSPC; + } + + return queue; +} + +/* + * If a given queue has a higher AC than the TID stream that is being added to + * it, the queue needs to be redirected to the lower AC. This function does that + * in such a case, otherwise - if no redirection required - it does nothing, + * unless the %force param is true. + */ +static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, + int ac, int ssn, unsigned int wdg_timeout, + bool force) +{ + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 0, + }; + bool shared_queue; + unsigned long mq; + int ret; + + /* + * If the AC is lower than current one - FIFO needs to be redirected to + * the lowest one of the streams in the queue. Check if this is needed + * here. + * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with + * value 3 and VO with value 0, so to check if ac X is lower than ac Y + * we need to check if the numerical value of X is LARGER than of Y. + */ + spin_lock_bh(&mvm->queue_info_lock); + if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { + spin_unlock_bh(&mvm->queue_info_lock); + + IWL_DEBUG_TX_QUEUES(mvm, + "No redirection needed on TXQ #%d\n", + queue); + return 0; + } + + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; + mq = mvm->queue_info[queue].hw_queue_to_mac80211; + shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); + spin_unlock_bh(&mvm->queue_info_lock); + + IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n", + queue, iwl_mvm_ac_to_tx_fifo[ac]); + + /* Stop MAC queues and wait for this queue to empty */ + iwl_mvm_stop_mac_queues(mvm, mq); + ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue)); + if (ret) { + IWL_ERR(mvm, "Error draining queue %d before reconfig\n", + queue); + ret = -EIO; + goto out; + } + + /* Before redirecting the queue we need to de-activate it */ + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); + if (ret) + IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, + ret); + + /* Make sure the SCD wrptr is correctly set before reconfiguring */ + iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); + + /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ + + /* Redirect to lower AC */ + iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], + cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, + ssn); + + /* Update AC marking of the queue */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].mac80211_ac = ac; + spin_unlock_bh(&mvm->queue_info_lock); + + /* + * Mark queue as shared in transport if shared + * Note this has to be done after queue enablement because enablement + * can also set this value, and there is no indication there to shared + * queues + */ + if (shared_queue) + iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); + +out: + /* Continue using the MAC queues */ + iwl_mvm_start_mac_queues(mvm, mq); + + return ret; +} + +static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, u8 ac, int tid, + struct ieee80211_hdr *hdr) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = iwl_mvm_ac_to_tx_fifo[ac], + .sta_id = mvmsta->sta_id, + .tid = tid, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); + u8 mac_queue = mvmsta->vif->hw_queue[ac]; + int queue = -1; + bool using_inactive_queue = false; + unsigned long disable_agg_tids = 0; + enum iwl_mvm_agg_state queue_state; + bool shared_queue = false; + int ssn; + unsigned long tfd_queue_mask; + int ret; + + lockdep_assert_held(&mvm->mutex); + + spin_lock_bh(&mvmsta->lock); + tfd_queue_mask = mvmsta->tfd_queue_msk; + spin_unlock_bh(&mvmsta->lock); + + spin_lock_bh(&mvm->queue_info_lock); + + /* + * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one + * exists + */ + if (!ieee80211_is_data_qos(hdr->frame_control) || + ieee80211_is_qos_nullfunc(hdr->frame_control)) { + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_MGMT_QUEUE, + IWL_MVM_DQA_MAX_MGMT_QUEUE); + if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) + IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", + queue); + + /* If no such queue is found, we'll use a DATA queue instead */ + } + + if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && + (mvm->queue_info[mvmsta->reserved_queue].status == + IWL_MVM_QUEUE_RESERVED || + mvm->queue_info[mvmsta->reserved_queue].status == + IWL_MVM_QUEUE_INACTIVE)) { + queue = mvmsta->reserved_queue; + mvm->queue_info[queue].reserved = true; + IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); + } + + if (queue < 0) + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + + /* + * Check if this queue is already allocated but inactive. + * In such a case, we'll need to first free this queue before enabling + * it again, so we'll mark it as reserved to make sure no new traffic + * arrives on it + */ + if (queue > 0 && + mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { + mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; + using_inactive_queue = true; + IWL_DEBUG_TX_QUEUES(mvm, + "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", + queue, mvmsta->sta_id, tid); + } + + /* No free queue - we'll have to share */ + if (queue <= 0) { + queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); + if (queue > 0) { + shared_queue = true; + mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; + } + } + + /* + * Mark TXQ as ready, even though it hasn't been fully configured yet, + * to make sure no one else takes it. + * This will allow avoiding re-acquiring the lock at the end of the + * configuration. On error we'll mark it back as free. + */ + if ((queue > 0) && !shared_queue) + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; + + spin_unlock_bh(&mvm->queue_info_lock); + + /* This shouldn't happen - out of queues */ + if (WARN_ON(queue <= 0)) { + IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", + tid, cfg.sta_id); + return -ENOSPC; + } + + /* + * Actual en/disablement of aggregations is through the ADD_STA HCMD, + * but for configuring the SCD to send A-MPDUs we need to mark the queue + * as aggregatable. + * Mark all DATA queues as allowing to be aggregated at some point + */ + cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || + queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); + + /* + * If this queue was previously inactive (idle) - we need to free it + * first + */ + if (using_inactive_queue) { + struct iwl_scd_txq_cfg_cmd cmd = { + .scd_queue = queue, + .enable = 0, + }; + u8 ac; + + disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); + + spin_lock_bh(&mvm->queue_info_lock); + ac = mvm->queue_info[queue].mac80211_ac; + cmd.sta_id = mvm->queue_info[queue].ra_sta_id; + cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac]; + spin_unlock_bh(&mvm->queue_info_lock); + + /* Disable the queue */ + iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, + true); + iwl_trans_txq_disable(mvm->trans, queue, false); + ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), + &cmd); + if (ret) { + IWL_ERR(mvm, + "Failed to free inactive queue %d (ret=%d)\n", + queue, ret); + + /* Re-mark the inactive queue as inactive */ + spin_lock_bh(&mvm->queue_info_lock); + mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; + spin_unlock_bh(&mvm->queue_info_lock); + + return ret; + } + } + + IWL_DEBUG_TX_QUEUES(mvm, + "Allocating %squeue #%d to sta %d on tid %d\n", + shared_queue ? "shared " : "", queue, + mvmsta->sta_id, tid); + + if (shared_queue) { + /* Disable any open aggs on this queue */ + disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); + + if (disable_agg_tids) { + IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", + queue); + iwl_mvm_invalidate_sta_queue(mvm, queue, + disable_agg_tids, false); + } + } + + ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); + iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, + wdg_timeout); + + /* + * Mark queue as shared in transport if shared + * Note this has to be done after queue enablement because enablement + * can also set this value, and there is no indication there to shared + * queues + */ + if (shared_queue) + iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); + + spin_lock_bh(&mvmsta->lock); + mvmsta->tid_data[tid].txq_id = queue; + mvmsta->tid_data[tid].is_tid_active = true; + mvmsta->tfd_queue_msk |= BIT(queue); + queue_state = mvmsta->tid_data[tid].state; + + if (mvmsta->reserved_queue == queue) + mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; + spin_unlock_bh(&mvmsta->lock); + + if (!shared_queue) { + ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); + if (ret) + goto out_err; + + /* If we need to re-enable aggregations... */ + if (queue_state == IWL_AGG_ON) { + ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); + if (ret) + goto out_err; + } + } else { + /* Redirect queue, if needed */ + ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, + wdg_timeout, false); + if (ret) + goto out_err; + } + + return 0; + +out_err: + iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); + + return ret; +} + +static inline u8 iwl_mvm_tid_to_ac_queue(int tid) +{ + if (tid == IWL_MAX_TID_COUNT) + return IEEE80211_AC_VO; /* MGMT */ + + return tid_to_mac80211_ac[tid]; +} + +static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, int tid) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; + struct sk_buff *skb; + struct ieee80211_hdr *hdr; + struct sk_buff_head deferred_tx; + u8 mac_queue; + bool no_queue = false; /* Marks if there is a problem with the queue */ + u8 ac; + + lockdep_assert_held(&mvm->mutex); + + skb = skb_peek(&tid_data->deferred_tx_frames); + if (!skb) + return; + hdr = (void *)skb->data; + + ac = iwl_mvm_tid_to_ac_queue(tid); + mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; + + if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && + iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { + IWL_ERR(mvm, + "Can't alloc TXQ for sta %d tid %d - dropping frame\n", + mvmsta->sta_id, tid); + + /* + * Mark queue as problematic so later the deferred traffic is + * freed, as we can do nothing with it + */ + no_queue = true; + } + + __skb_queue_head_init(&deferred_tx); + + /* Disable bottom-halves when entering TX path */ + local_bh_disable(); + spin_lock(&mvmsta->lock); + skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); + spin_unlock(&mvmsta->lock); + + while ((skb = __skb_dequeue(&deferred_tx))) + if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) + ieee80211_free_txskb(mvm->hw, skb); + local_bh_enable(); + + /* Wake queue */ + iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); +} + +void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) +{ + struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, + add_stream_wk); + struct ieee80211_sta *sta; + struct iwl_mvm_sta *mvmsta; + unsigned long deferred_tid_traffic; + int sta_id, tid; + + /* Check inactivity of queues */ + iwl_mvm_inactivity_check(mvm); + + mutex_lock(&mvm->mutex); + + /* Go over all stations with deferred traffic */ + for_each_set_bit(sta_id, mvm->sta_deferred_frames, + IWL_MVM_STATION_COUNT) { + clear_bit(sta_id, mvm->sta_deferred_frames); + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + if (IS_ERR_OR_NULL(sta)) + continue; + + mvmsta = iwl_mvm_sta_from_mac80211(sta); + deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; + + for_each_set_bit(tid, &deferred_tid_traffic, + IWL_MAX_TID_COUNT + 1) + iwl_mvm_tx_deferred_stream(mvm, sta, tid); + } + + mutex_unlock(&mvm->mutex); +} + +static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, + struct ieee80211_sta *sta, + enum nl80211_iftype vif_type) +{ + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + int queue; + + /* + * Check for inactive queues, so we don't reach a situation where we + * can't add a STA due to a shortage in queues that doesn't really exist + */ + iwl_mvm_inactivity_check(mvm); + + spin_lock_bh(&mvm->queue_info_lock); + + /* Make sure we have free resources for this STA */ + if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && + !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && + (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == + IWL_MVM_QUEUE_FREE)) + queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; + else + queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + IWL_MVM_DQA_MIN_DATA_QUEUE, + IWL_MVM_DQA_MAX_DATA_QUEUE); + if (queue < 0) { + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "No available queues for new station\n"); + return -ENOSPC; + } + mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; + + spin_unlock_bh(&mvm->queue_info_lock); + + mvmsta->reserved_queue = queue; + + IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", + queue, mvmsta->sta_id); + + return 0; +} + int iwl_mvm_add_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -309,23 +996,37 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ mvm_sta->tfd_queue_msk = 0; - /* allocate new queues for a TDLS station */ - if (sta->tdls) { + /* + * Allocate new queues for a TDLS station, unless we're in DQA mode, + * and then they'll be allocated dynamically + */ + if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { ret = iwl_mvm_tdls_sta_init(mvm, sta); if (ret) return ret; - } else { + } else if (!iwl_mvm_is_dqa_supported(mvm)) { for (i = 0; i < IEEE80211_NUM_ACS; i++) if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); } /* for HW restart - reset everything but the sequence number */ - for (i = 0; i < IWL_MAX_TID_COUNT; i++) { + for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { u16 seq = mvm_sta->tid_data[i].seq_number; memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); mvm_sta->tid_data[i].seq_number = seq; + + if (!iwl_mvm_is_dqa_supported(mvm)) + continue; + + /* + * Mark all queues for this STA as unallocated and defer TX + * frames until the queue is allocated + */ + mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); } + mvm_sta->deferred_traffic_tid_map = 0; mvm_sta->agg_tids = 0; if (iwl_mvm_has_new_rx_api(mvm) && @@ -338,7 +1039,14 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->dup_data = dup_data; } - ret = iwl_mvm_sta_send_to_fw(mvm, sta, false); + if (iwl_mvm_is_dqa_supported(mvm)) { + ret = iwl_mvm_reserve_sta_stream(mvm, sta, + ieee80211_vif_type_p2p(vif)); + if (ret) + goto err; + } + + ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); if (ret) goto err; @@ -356,7 +1064,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, return 0; err: - iwl_mvm_tdls_sta_deinit(mvm, sta); + if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) + iwl_mvm_tdls_sta_deinit(mvm, sta); return ret; } @@ -364,7 +1073,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - return iwl_mvm_sta_send_to_fw(mvm, sta, true); + return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); } int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, @@ -509,6 +1218,26 @@ void iwl_mvm_sta_drained_wk(struct work_struct *wk) mutex_unlock(&mvm->mutex); } +static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct iwl_mvm_sta *mvm_sta) +{ + int ac; + int i; + + lockdep_assert_held(&mvm->mutex); + + for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { + if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) + continue; + + ac = iwl_mvm_tid_to_ac_queue(i); + iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, + vif->hw_queue[ac], i, 0); + mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; + } +} + int iwl_mvm_rm_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -522,8 +1251,9 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, if (iwl_mvm_has_new_rx_api(mvm)) kfree(mvm_sta->dup_data); - if (vif->type == NL80211_IFTYPE_STATION && - mvmvif->ap_sta_id == mvm_sta->sta_id) { + if ((vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == mvm_sta->sta_id) || + iwl_mvm_is_dqa_supported(mvm)){ ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); if (ret) return ret; @@ -537,16 +1267,45 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, return ret; ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); - /* if we are associated - we can't remove the AP STA now */ - if (vif->bss_conf.assoc) - return ret; + /* If DQA is supported - the queues can be disabled now */ + if (iwl_mvm_is_dqa_supported(mvm)) { + u8 reserved_txq = mvm_sta->reserved_queue; + enum iwl_mvm_queue_status *status; + + iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); + + /* + * If no traffic has gone through the reserved TXQ - it + * is still marked as IWL_MVM_QUEUE_RESERVED, and + * should be manually marked as free again + */ + spin_lock_bh(&mvm->queue_info_lock); + status = &mvm->queue_info[reserved_txq].status; + if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && + (*status != IWL_MVM_QUEUE_FREE), + "sta_id %d reserved txq %d status %d", + mvm_sta->sta_id, reserved_txq, *status)) { + spin_unlock_bh(&mvm->queue_info_lock); + return -EINVAL; + } - /* unassoc - go ahead - remove the AP STA now */ - mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + *status = IWL_MVM_QUEUE_FREE; + spin_unlock_bh(&mvm->queue_info_lock); + } + + if (vif->type == NL80211_IFTYPE_STATION && + mvmvif->ap_sta_id == mvm_sta->sta_id) { + /* if associated - we can't remove the AP STA now */ + if (vif->bss_conf.assoc) + return ret; - /* clear d0i3_ap_sta_id if no longer relevant */ - if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) - mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + /* unassoc - go ahead - remove the AP STA now */ + mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; + + /* clear d0i3_ap_sta_id if no longer relevant */ + if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) + mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; + } } /* @@ -584,7 +1343,7 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, } else { spin_unlock_bh(&mvm_sta->lock); - if (sta->tdls) + if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) iwl_mvm_tdls_sta_deinit(mvm, sta); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); @@ -682,8 +1441,9 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); /* Map Aux queue to fifo - needs to happen before adding Aux station */ - iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, - IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); + if (!iwl_mvm_is_dqa_supported(mvm)) + iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, + IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); /* Allocate aux station and assign to it the aux queue */ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), @@ -691,6 +1451,19 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) if (ret) return ret; + if (iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_MCAST, + .sta_id = mvm->aux_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + + iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, + wdg_timeout); + } + ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, MAC_INDEX_AUX, 0); @@ -750,6 +1523,33 @@ int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_is_dqa_supported(mvm)) { + struct iwl_trans_txq_scd_cfg cfg = { + .fifo = IWL_MVM_TX_FIFO_VO, + .sta_id = mvmvif->bcast_sta.sta_id, + .tid = IWL_MAX_TID_COUNT, + .aggregate = false, + .frame_limit = IWL_FRAME_LIMIT, + }; + unsigned int wdg_timeout = + iwl_mvm_get_wd_timeout(mvm, vif, false, false); + int queue; + + if ((vif->type == NL80211_IFTYPE_AP) && + (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) + queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; + else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && + (mvmvif->bcast_sta.tfd_queue_msk & + BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) + queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; + else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) + return -EINVAL; + + iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, + wdg_timeout); + } + if (vif->type == NL80211_IFTYPE_ADHOC) baddr = vif->bss_conf.bssid; @@ -778,20 +1578,28 @@ int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - u32 qmask; + u32 qmask = 0; lockdep_assert_held(&mvm->mutex); - qmask = iwl_mvm_mac_get_queues_mask(vif); + if (!iwl_mvm_is_dqa_supported(mvm)) + qmask = iwl_mvm_mac_get_queues_mask(vif); - /* - * The firmware defines the TFD queue mask to only be relevant - * for *unicast* queues, so the multicast (CAB) queue shouldn't - * be included. - */ - if (vif->type == NL80211_IFTYPE_AP) + if (vif->type == NL80211_IFTYPE_AP) { + /* + * The firmware defines the TFD queue mask to only be relevant + * for *unicast* queues, so the multicast (CAB) queue shouldn't + * be included. + */ qmask &= ~BIT(vif->cab_queue); + if (iwl_mvm_is_dqa_supported(mvm)) + qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); + } else if (iwl_mvm_is_dqa_supported(mvm) && + vif->type == NL80211_IFTYPE_P2P_DEVICE) { + qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); + } + return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, ieee80211_vif_type_p2p(vif)); } @@ -849,11 +1657,92 @@ int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) #define IWL_MAX_RX_BA_SESSIONS 16 +static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) +{ + struct iwl_mvm_delba_notif notif = { + .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, + .metadata.sync = 1, + .delba.baid = baid, + }; + iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); +}; + +static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, + struct iwl_mvm_baid_data *data) +{ + int i; + + iwl_mvm_sync_rxq_del_ba(mvm, data->baid); + + for (i = 0; i < mvm->trans->num_rx_queues; i++) { + int j; + struct iwl_mvm_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + + spin_lock_bh(&reorder_buf->lock); + if (likely(!reorder_buf->num_stored)) { + spin_unlock_bh(&reorder_buf->lock); + continue; + } + + /* + * This shouldn't happen in regular DELBA since the internal + * delBA notification should trigger a release of all frames in + * the reorder buffer. + */ + WARN_ON(1); + + for (j = 0; j < reorder_buf->buf_size; j++) + __skb_queue_purge(&reorder_buf->entries[j]); + /* + * Prevent timer re-arm. This prevents a very far fetched case + * where we timed out on the notification. There may be prior + * RX frames pending in the RX queue before the notification + * that might get processed between now and the actual deletion + * and we would re-arm the timer although we are deleting the + * reorder buffer. + */ + reorder_buf->removed = true; + spin_unlock_bh(&reorder_buf->lock); + del_timer_sync(&reorder_buf->reorder_timer); + } +} + +static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, + u32 sta_id, + struct iwl_mvm_baid_data *data, + u16 ssn, u8 buf_size) +{ + int i; + + for (i = 0; i < mvm->trans->num_rx_queues; i++) { + struct iwl_mvm_reorder_buffer *reorder_buf = + &data->reorder_buf[i]; + int j; + + reorder_buf->num_stored = 0; + reorder_buf->head_sn = ssn; + reorder_buf->buf_size = buf_size; + /* rx reorder timer */ + reorder_buf->reorder_timer.function = + iwl_mvm_reorder_timer_expired; + reorder_buf->reorder_timer.data = (unsigned long)reorder_buf; + init_timer(&reorder_buf->reorder_timer); + spin_lock_init(&reorder_buf->lock); + reorder_buf->mvm = mvm; + reorder_buf->queue = i; + reorder_buf->sta_id = sta_id; + for (j = 0; j < reorder_buf->buf_size; j++) + __skb_queue_head_init(&reorder_buf->entries[j]); + } +} + int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size) + int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; + struct iwl_mvm_baid_data *baid_data = NULL; int ret; u32 status; @@ -864,6 +1753,19 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, return -ENOSPC; } + if (iwl_mvm_has_new_rx_api(mvm) && start) { + /* + * Allocate here so if allocation fails we can bail out early + * before starting the BA session in the firmware + */ + baid_data = kzalloc(sizeof(*baid_data) + + mvm->trans->num_rx_queues * + sizeof(baid_data->reorder_buf[0]), + GFP_KERNEL); + if (!baid_data) + return -ENOMEM; + } + cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); cmd.sta_id = mvm_sta->sta_id; cmd.add_modify = STA_MODE_MODIFY; @@ -882,12 +1784,12 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, iwl_mvm_add_sta_cmd_size(mvm), &cmd, &status); if (ret) - return ret; + goto out_free; switch (status & IWL_ADD_STA_STATUS_MASK) { case ADD_STA_SUCCESS: - IWL_DEBUG_INFO(mvm, "RX BA Session %sed in fw\n", - start ? "start" : "stopp"); + IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", + start ? "start" : "stopp"); break; case ADD_STA_IMMEDIATE_BA_FAILURE: IWL_WARN(mvm, "RX BA Session refused by fw\n"); @@ -900,19 +1802,84 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - if (!ret) { - if (start) - mvm->rx_ba_sessions++; - else if (mvm->rx_ba_sessions > 0) + if (ret) + goto out_free; + + if (start) { + u8 baid; + + mvm->rx_ba_sessions++; + + if (!iwl_mvm_has_new_rx_api(mvm)) + return 0; + + if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { + ret = -EINVAL; + goto out_free; + } + baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> + IWL_ADD_STA_BAID_SHIFT); + baid_data->baid = baid; + baid_data->timeout = timeout; + baid_data->last_rx = jiffies; + init_timer(&baid_data->session_timer); + baid_data->session_timer.function = + iwl_mvm_rx_agg_session_expired; + baid_data->session_timer.data = + (unsigned long)&mvm->baid_map[baid]; + baid_data->mvm = mvm; + baid_data->tid = tid; + baid_data->sta_id = mvm_sta->sta_id; + + mvm_sta->tid_to_baid[tid] = baid; + if (timeout) + mod_timer(&baid_data->session_timer, + TU_TO_EXP_TIME(timeout * 2)); + + iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id, + baid_data, ssn, buf_size); + /* + * protect the BA data with RCU to cover a case where our + * internal RX sync mechanism will timeout (not that it's + * supposed to happen) and we will free the session data while + * RX is being processed in parallel + */ + IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", + mvm_sta->sta_id, tid, baid); + WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); + rcu_assign_pointer(mvm->baid_map[baid], baid_data); + } else { + u8 baid = mvm_sta->tid_to_baid[tid]; + + if (mvm->rx_ba_sessions > 0) /* check that restart flow didn't zero the counter */ mvm->rx_ba_sessions--; + if (!iwl_mvm_has_new_rx_api(mvm)) + return 0; + + if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) + return -EINVAL; + + baid_data = rcu_access_pointer(mvm->baid_map[baid]); + if (WARN_ON(!baid_data)) + return -EINVAL; + + /* synchronize all rx queues so we can safely delete */ + iwl_mvm_free_reorder(mvm, baid_data); + del_timer_sync(&baid_data->session_timer); + RCU_INIT_POINTER(mvm->baid_map[baid], NULL); + kfree_rcu(baid_data, rcu_head); + IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); } + return 0; +out_free: + kfree(baid_data); return ret; } -static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u8 queue, bool start) +int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, + int tid, u8 queue, bool start) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; @@ -925,7 +1892,9 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, mvm_sta->tfd_queue_msk |= BIT(queue); mvm_sta->tid_disable_agg &= ~BIT(tid); } else { - mvm_sta->tfd_queue_msk &= ~BIT(queue); + /* In DQA-mode the queue isn't removed on agg termination */ + if (!iwl_mvm_is_dqa_supported(mvm)) + mvm_sta->tfd_queue_msk &= ~BIT(queue); mvm_sta->tid_disable_agg |= BIT(tid); } @@ -965,6 +1934,7 @@ const u8 tid_to_mac80211_ac[] = { IEEE80211_AC_VI, IEEE80211_AC_VO, IEEE80211_AC_VO, + IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ }; static const u8 tid_to_ucode_ac[] = { @@ -1008,17 +1978,36 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, spin_lock_bh(&mvm->queue_info_lock); - txq_id = iwl_mvm_find_free_queue(mvm, mvm->first_agg_queue, - mvm->last_agg_queue); - if (txq_id < 0) { - ret = txq_id; - spin_unlock_bh(&mvm->queue_info_lock); - IWL_ERR(mvm, "Failed to allocate agg queue\n"); - goto release_locks; + /* + * Note the possible cases: + * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed + * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free + * one and mark it as reserved + * 3. In DQA mode, but no traffic yet on this TID: same treatment as in + * non-DQA mode, since the TXQ hasn't yet been allocated + */ + txq_id = mvmsta->tid_data[tid].txq_id; + if (!iwl_mvm_is_dqa_supported(mvm) || + mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { + txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, + mvm->first_agg_queue, + mvm->last_agg_queue); + if (txq_id < 0) { + ret = txq_id; + spin_unlock_bh(&mvm->queue_info_lock); + IWL_ERR(mvm, "Failed to allocate agg queue\n"); + goto release_locks; + } + + /* TXQ hasn't yet been enabled, so mark it only as reserved */ + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; } - mvm->queue_info[txq_id].setup_reserved = true; spin_unlock_bh(&mvm->queue_info_lock); + IWL_DEBUG_TX_QUEUES(mvm, + "AGG for tid %d will be on queue #%d\n", + tid, txq_id); + tid_data = &mvmsta->tid_data[tid]; tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); tid_data->txq_id = txq_id; @@ -1053,6 +2042,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned int wdg_timeout = iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); int queue, ret; + bool alloc_queue = true; u16 ssn; struct iwl_trans_txq_scd_cfg cfg = { @@ -1078,8 +2068,46 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[tid_to_mac80211_ac[tid]], - ssn, &cfg, wdg_timeout); + /* In DQA mode, the existing queue might need to be reconfigured */ + if (iwl_mvm_is_dqa_supported(mvm)) { + spin_lock_bh(&mvm->queue_info_lock); + /* Maybe there is no need to even alloc a queue... */ + if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) + alloc_queue = false; + spin_unlock_bh(&mvm->queue_info_lock); + + /* + * Only reconfig the SCD for the queue if the window size has + * changed from current (become smaller) + */ + if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { + /* + * If reconfiguring an existing queue, it first must be + * drained + */ + ret = iwl_trans_wait_tx_queue_empty(mvm->trans, + BIT(queue)); + if (ret) { + IWL_ERR(mvm, + "Error draining queue before reconfig\n"); + return ret; + } + + ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, + mvmsta->sta_id, tid, + buf_size, ssn); + if (ret) { + IWL_ERR(mvm, + "Error reconfiguring TXQ #%d\n", queue); + return ret; + } + } + } + + if (alloc_queue) + iwl_mvm_enable_txq(mvm, queue, + vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, + &cfg, wdg_timeout); ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); if (ret) @@ -1087,7 +2115,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* No need to mark as reserved */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[queue].setup_reserved = false; + mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; spin_unlock_bh(&mvm->queue_info_lock); /* @@ -1134,9 +2162,16 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); - /* No need to mark as reserved anymore */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; spin_unlock_bh(&mvm->queue_info_lock); switch (tid_data->state) { @@ -1162,9 +2197,11 @@ int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - iwl_mvm_disable_txq(mvm, txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) { + int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; + + iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); + } return 0; case IWL_AGG_STARTING: case IWL_EMPTYING_HW_QUEUE_ADDBA: @@ -1215,9 +2252,16 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, mvmsta->agg_tids &= ~BIT(tid); spin_unlock_bh(&mvmsta->lock); - /* No need to mark as reserved */ spin_lock_bh(&mvm->queue_info_lock); - mvm->queue_info[txq_id].setup_reserved = false; + /* + * The TXQ is marked as reserved only if no traffic came through yet + * This means no traffic has been sent on this TID (agg'd or not), so + * we no longer have use for the queue. Since it hasn't even been + * allocated through iwl_mvm_enable_txq, so we can just mark it back as + * free. + */ + if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) + mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; spin_unlock_bh(&mvm->queue_info_lock); if (old_state >= IWL_AGG_ON) { @@ -1230,9 +2274,12 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); - iwl_mvm_disable_txq(mvm, tid_data->txq_id, - vif->hw_queue[tid_to_mac80211_ac[tid]], tid, - 0); + if (!iwl_mvm_is_dqa_supported(mvm)) { + int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; + + iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, + tid, 0); + } } return 0; @@ -1285,6 +2332,7 @@ static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); + /* * It is possible that the 'sta' parameter is NULL, * for example when a GTK is removed - the sta_id will then @@ -1337,6 +2385,13 @@ static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); break; + case WLAN_CIPHER_SUITE_GCMP_256: + key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); + /* fall through */ + case WLAN_CIPHER_SUITE_GCMP: + key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); + memcpy(cmd.key, keyconf->key, keyconf->keylen); + break; default: key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); memcpy(cmd.key, keyconf->key, keyconf->keylen); @@ -1391,6 +2446,14 @@ static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, struct ieee80211_key_seq seq; const u8 *pn; + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_AES_CMAC: + igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); + break; + default: + return -EINVAL; + } + memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); ieee80211_get_key_rx_seq(keyconf, 0, &seq); pn = seq.aes_cmac.pn; @@ -1457,6 +2520,8 @@ static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_GCMP: + case WLAN_CIPHER_SUITE_GCMP_256: ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 0, NULL, 0, key_offset); break; |