diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/rx.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 220 |
1 files changed, 137 insertions, 83 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 4be3c35af..5c36e6d00 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -161,20 +161,21 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) return cpu_to_le32((u32)(dma_addr >> 8)); } -static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) -{ - iwl_write_prph(trans, ofs, val & 0xffffffff); - iwl_write_prph(trans, ofs + 4, val >> 32); -} - /* * iwl_pcie_rx_stop - stops the Rx DMA */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); - return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, - FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000); + if (trans->cfg->mq_rx_supported) { + iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); + } else { + iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG, + FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, + 1000); + } } /* @@ -208,8 +209,8 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, rxq->write_actual = round_down(rxq->write, 8); if (trans->cfg->mq_rx_supported) - iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(rxq->id), - rxq->write_actual); + iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), + rxq->write_actual); else iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual); } @@ -232,10 +233,10 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) } /* - * iwl_pcie_rxq_mq_restock - restock implementation for multi-queue rx + * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx */ -static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; @@ -258,7 +259,7 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); - + rxb->invalid = false; /* 12 first bits are expected to be empty */ WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); /* Point to Rx buffer via next RBD in circular buffer */ @@ -280,10 +281,10 @@ static void iwl_pcie_rxq_mq_restock(struct iwl_trans *trans, } /* - * iwl_pcie_rxq_sq_restock - restock implementation for single queue rx + * iwl_pcie_rxsq_restock - restock implementation for single queue rx */ -static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, - struct iwl_rxq *rxq) +static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, + struct iwl_rxq *rxq) { struct iwl_rx_mem_buffer *rxb; @@ -309,6 +310,7 @@ static void iwl_pcie_rxq_sq_restock(struct iwl_trans *trans, rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); list_del(&rxb->list); + rxb->invalid = false; /* Point to Rx buffer via next RBD in circular buffer */ bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma); @@ -342,9 +344,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { if (trans->cfg->mq_rx_supported) - iwl_pcie_rxq_mq_restock(trans, rxq); + iwl_pcie_rxmq_restock(trans, rxq); else - iwl_pcie_rxq_sq_restock(trans, rxq); + iwl_pcie_rxsq_restock(trans, rxq); } /* @@ -694,6 +696,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size; + unsigned long flags; const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ switch (trans_pcie->rx_buf_size) { @@ -711,23 +714,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; } + if (!iwl_trans_grab_nic_access(trans, &flags)) + return; + /* Stop Rx DMA */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); /* reset and flush pointers */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); + iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0); /* Reset driver's Rx queue write index */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); + iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); /* Tell device where to find RBD circular buffer in DRAM */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, - (u32)(rxq->bd_dma >> 8)); + iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, + (u32)(rxq->bd_dma >> 8)); /* Tell device where in DRAM to update its Rx status */ - iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, - rxq->rb_stts_dma >> 4); + iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, + rxq->rb_stts_dma >> 4); /* Enable Rx DMA * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in @@ -737,13 +743,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) * RB timeout 0x10 * 256 RBDs */ - iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, - FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | - FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | - FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | - rb_size| - (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| - (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, + FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | + FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | + FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | + rb_size | + (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) | + (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); + + iwl_trans_release_nic_access(trans, &flags); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); @@ -753,10 +761,28 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE); } +void iwl_pcie_enable_rx_wake(struct iwl_trans *trans, bool enable) +{ + /* + * Turn on the chicken-bits that cause MAC wakeup for RX-related + * values. + * This costs some power, but needed for W/A 9000 integrated A-step + * bug where shadow registers are not in the retention list and their + * value is lost when NIC powers down + */ + if (trans->cfg->integrated) { + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, + CSR_MAC_SHADOW_REG_CTRL_RX_WAKE); + iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTL2, + CSR_MAC_SHADOW_REG_CTL2_RX_WAKE); + } +} + static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 rb_size, enabled = 0; + unsigned long flags; int i; switch (trans_pcie->rx_buf_size) { @@ -774,60 +800,70 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) rb_size = RFH_RXF_DMA_RB_SIZE_4K; } + if (!iwl_trans_grab_nic_access(trans, &flags)) + return; + /* Stop Rx DMA */ - iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); + iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0); /* disable free amd used rx queue operation */ - iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); + iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0); for (i = 0; i < trans->num_rx_queues; i++) { /* Tell device where to find RBD free table in DRAM */ - iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), - (u64)(trans_pcie->rxq[i].bd_dma)); + iwl_write_prph64_no_grab(trans, + RFH_Q_FRBDCB_BA_LSB(i), + trans_pcie->rxq[i].bd_dma); /* Tell device where to find RBD used table in DRAM */ - iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), - (u64)(trans_pcie->rxq[i].used_bd_dma)); + iwl_write_prph64_no_grab(trans, + RFH_Q_URBDCB_BA_LSB(i), + trans_pcie->rxq[i].used_bd_dma); /* Tell device where in DRAM to update its Rx status */ - iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), - trans_pcie->rxq[i].rb_stts_dma); + iwl_write_prph64_no_grab(trans, + RFH_Q_URBD_STTS_WPTR_LSB(i), + trans_pcie->rxq[i].rb_stts_dma); /* Reset device indice tables */ - iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); - iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); - iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0); + iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0); enabled |= BIT(i) | BIT(i + 16); } - /* restock default queue */ - iwl_pcie_rxq_mq_restock(trans, &trans_pcie->rxq[0]); - /* * Enable Rx DMA - * Single frame mode * Rx buffer size 4 or 8k or 12k * Min RB size 4 or 8 * Drop frames that exceed RB size * 512 RBDs */ - iwl_write_prph(trans, RFH_RXF_DMA_CFG, - RFH_DMA_EN_ENABLE_VAL | - rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | - RFH_RXF_DMA_MIN_RB_4_8 | - RFH_RXF_DMA_DROP_TOO_LARGE_MASK | - RFH_RXF_DMA_RBDCB_SIZE_512); + iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, + RFH_DMA_EN_ENABLE_VAL | rb_size | + RFH_RXF_DMA_MIN_RB_4_8 | + RFH_RXF_DMA_DROP_TOO_LARGE_MASK | + RFH_RXF_DMA_RBDCB_SIZE_512); /* * Activate DMA snooping. - * Set RX DMA chunk size to 64B + * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe * Default queue is 0 */ - iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | - (DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | - RFH_GEN_CFG_SERVICE_DMA_SNOOP); + iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | + (DEFAULT_RXQ_NUM << + RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | + RFH_GEN_CFG_SERVICE_DMA_SNOOP | + (trans->cfg->integrated ? + RFH_GEN_CFG_RB_CHUNK_SIZE_64 : + RFH_GEN_CFG_RB_CHUNK_SIZE_128) << + RFH_GEN_CFG_RB_CHUNK_SIZE_POS); /* Enable the relevant rx queues */ - iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); + iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled); + + iwl_trans_release_nic_access(trans, &flags); /* Set interrupt coalescing timer to default (2048 usecs) */ iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); + + iwl_pcie_enable_rx_wake(trans, true); } static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) @@ -908,6 +944,8 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); num_alloc = queue_size + allocator_pool_size; + BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != + ARRAY_SIZE(trans_pcie->rx_pool)); for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; @@ -916,16 +954,18 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) else list_add(&rxb->list, &def_rxq->rx_used); trans_pcie->global_table[i] = rxb; - rxb->vid = (u16)i; + rxb->vid = (u16)(i + 1); + rxb->invalid = true; } iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq); - if (trans->cfg->mq_rx_supported) { + + if (trans->cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); - } else { - iwl_pcie_rxq_sq_restock(trans, def_rxq); + else iwl_pcie_rx_hw_init(trans, def_rxq); - } + + iwl_pcie_rxq_restock(trans, def_rxq); spin_lock(&def_rxq->lock); iwl_pcie_rxq_inc_wr_ptr(trans, def_rxq); @@ -1064,6 +1104,9 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) break; + WARN_ON((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >> + FH_RSCSR_RXQ_POS != rxq->id); + IWL_DEBUG_RX(trans, "cmd at offset %d: %s (0x%.2x, seq 0x%x)\n", rxcb._offset, @@ -1201,10 +1244,19 @@ restart: */ u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; - if (WARN(vid >= ARRAY_SIZE(trans_pcie->global_table), - "Invalid rxb index from HW %u\n", (u32)vid)) + if (WARN(!vid || + vid > ARRAY_SIZE(trans_pcie->global_table), + "Invalid rxb index from HW %u\n", (u32)vid)) { + iwl_force_nmi(trans); + goto out; + } + rxb = trans_pcie->global_table[vid - 1]; + if (WARN(rxb->invalid, + "Invalid rxb from HW %u\n", (u32)vid)) { + iwl_force_nmi(trans); goto out; - rxb = trans_pcie->global_table[vid]; + } + rxb->invalid = true; } else { rxb = rxq->queue[i]; rxq->queue[i] = NULL; @@ -1292,7 +1344,7 @@ static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, * write 1 clear (W1C) register, meaning that it's being clear * by writing 1 to the bit. */ - iwl_write_direct32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); + iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); } /* @@ -1484,7 +1536,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) * have anything to service */ if (test_bit(STATUS_INT_ENABLED, &trans->status)) - iwl_enable_interrupts(trans); + _iwl_enable_interrupts(trans); spin_unlock(&trans_pcie->irq_lock); lock_map_release(&trans->sync_cmd_lockdep_map); return IRQ_NONE; @@ -1676,15 +1728,17 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) inta & ~trans_pcie->inta_mask); } + spin_lock(&trans_pcie->irq_lock); + /* only Re-enable all interrupt if disabled by irq */ + if (test_bit(STATUS_INT_ENABLED, &trans->status)) + _iwl_enable_interrupts(trans); /* we are loading the firmware, enable FH_TX interrupt only */ - if (handled & CSR_INT_BIT_FH_TX) + else if (handled & CSR_INT_BIT_FH_TX) iwl_enable_fw_load_int(trans); - /* only Re-enable all interrupt if disabled by irq */ - else if (test_bit(STATUS_INT_ENABLED, &trans->status)) - iwl_enable_interrupts(trans); /* Re-enable RF_KILL if it occurred */ else if (handled & CSR_INT_BIT_RF_KILL) iwl_enable_rfkill_int(trans); + spin_unlock(&trans_pcie->irq_lock); out: lock_map_release(&trans->sync_cmd_lockdep_map); @@ -1748,7 +1802,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans) return; spin_lock(&trans_pcie->irq_lock); - iwl_disable_interrupts(trans); + _iwl_disable_interrupts(trans); memset(trans_pcie->ict_tbl, 0, ICT_SIZE); @@ -1764,7 +1818,7 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans) trans_pcie->use_ict = true; trans_pcie->ict_index = 0; iwl_write32(trans, CSR_INT, trans_pcie->inta_mask); - iwl_enable_interrupts(trans); + _iwl_enable_interrupts(trans); spin_unlock(&trans_pcie->irq_lock); } @@ -1805,19 +1859,19 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) struct msix_entry *entry = dev_id; struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry); struct iwl_trans *trans = trans_pcie->trans; - struct isr_statistics *isr_stats = isr_stats = &trans_pcie->isr_stats; + struct isr_statistics *isr_stats = &trans_pcie->isr_stats; u32 inta_fh, inta_hw; lock_map_acquire(&trans->sync_cmd_lockdep_map); spin_lock(&trans_pcie->irq_lock); - inta_fh = iwl_read_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD); - inta_hw = iwl_read_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD); + inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD); + inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD); /* * Clear causes registers to avoid being handling the same cause. */ - iwl_write_direct32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); - iwl_write_direct32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); + iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh); + iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw); spin_unlock(&trans_pcie->irq_lock); if (unlikely(!(inta_fh | inta_hw))) { |