diff options
Diffstat (limited to 'drivers/net/ethernet/qlogic/qed/qed_spq.c')
-rw-r--r-- | drivers/net/ethernet/qlogic/qed/qed_spq.c | 97 |
1 files changed, 62 insertions, 35 deletions
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 89469d5aa..d73456eab 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -27,6 +27,7 @@ #include "qed_mcp.h" #include "qed_reg_addr.h" #include "qed_sp.h" +#include "qed_sriov.h" /*************************************************************************** * Structures & Definitions @@ -212,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, DQ_XCM_CORE_SPQ_PROD_CMD); db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - - /* validate producer is up to-date */ - rmb(); - db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); - /* do not reorder */ - barrier(); + /* make sure the SPQE is updated before the doorbell */ + wmb(); DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); /* make sure doorbell is rang */ - mmiowb(); + wmb(); DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", @@ -242,10 +239,17 @@ static int qed_async_event_completion(struct qed_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { - DP_NOTICE(p_hwfn, - "Unknown Async completion for protocol: %d\n", - p_eqe->protocol_id); - return -EINVAL; + switch (p_eqe->protocol_id) { + case PROTOCOLID_COMMON: + return qed_sriov_eqe_event(p_hwfn, + p_eqe->opcode, + p_eqe->echo, &p_eqe->data); + default: + DP_NOTICE(p_hwfn, + "Unknown Async completion for protocol: %d\n", + p_eqe->protocol_id); + return -EINVAL; + } } /*************************************************************************** @@ -335,6 +339,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, num_elem, sizeof(union event_ring_element), &p_eq->chain)) { @@ -379,6 +384,9 @@ static int qed_cqe_completion( struct eth_slow_path_rx_cqe *cqe, enum protocol_type protocol) { + if (IS_VF(p_hwfn->cdev)) + return 0; + /* @@@tmp - it's possible we'll eventually want to handle some * actual commands that can arrive here, but for now this is only * used to complete the ramrod using the echo value on the cqe @@ -405,10 +413,10 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, ***************************************************************************/ void qed_spq_setup(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = p_hwfn->p_spq; - struct qed_spq_entry *p_virt = NULL; - dma_addr_t p_phys = 0; - unsigned int i = 0; + struct qed_spq *p_spq = p_hwfn->p_spq; + struct qed_spq_entry *p_virt = NULL; + dma_addr_t p_phys = 0; + u32 i, capacity; INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@ -420,7 +428,8 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod); p_virt = p_spq->p_virt; - for (i = 0; i < p_spq->chain.capacity; i++) { + capacity = qed_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); list_add_tail(&p_virt->list, &p_spq->free_pool); @@ -448,9 +457,10 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) int qed_spq_alloc(struct qed_hwfn *p_hwfn) { - struct qed_spq *p_spq = NULL; - dma_addr_t p_phys = 0; - struct qed_spq_entry *p_virt = NULL; + struct qed_spq_entry *p_virt = NULL; + struct qed_spq *p_spq = NULL; + dma_addr_t p_phys = 0; + u32 capacity; /* SPQ struct */ p_spq = @@ -464,6 +474,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_SINGLE, + QED_CHAIN_CNT_TYPE_U16, 0, /* N/A when the mode is SINGLE */ sizeof(struct slow_path_element), &p_spq->chain)) { @@ -472,11 +483,11 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) } /* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = qed_chain_get_capacity(&p_spq->chain); p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - &p_phys, - GFP_KERNEL); + &p_phys, GFP_KERNEL); if (!p_virt) goto spq_allocate_fail; @@ -496,16 +507,18 @@ spq_allocate_fail: void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + u32 capacity; if (!p_spq) return; - if (p_spq->p_virt) + if (p_spq->p_virt) { + capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, - p_spq->chain.capacity * + capacity * sizeof(struct qed_spq_entry), - p_spq->p_virt, - p_spq->p_phys); + p_spq->p_virt, p_spq->p_phys); + } qed_chain_free(p_hwfn->cdev, &p_spq->chain); ; @@ -603,7 +616,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn, *p_en2 = *p_ent; - kfree(p_ent); + /* EBLOCK responsible to free the allocated p_ent */ + if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK) + kfree(p_ent); p_ent = p_en2; } @@ -738,6 +753,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn, * Thus, after gaining the answer perform the cleanup here. */ rc = qed_spq_block(p_hwfn, p_ent, fw_return_code); + + if (p_ent->queue == &p_spq->unlimited_pending) { + /* This is an allocated p_ent which does not need to + * return to pool. + */ + kfree(p_ent); + return rc; + } + if (rc) goto spq_post_fail2; @@ -791,13 +815,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, * in a bitmap and increasing the chain consumer only * for the first successive completed entries. */ - bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE); + __set_bit(pos, p_spq->p_comp_bitmap); while (test_bit(p_spq->comp_bitmap_idx, p_spq->p_comp_bitmap)) { - bitmap_clear(p_spq->p_comp_bitmap, - p_spq->comp_bitmap_idx, - SPQ_RING_SIZE); + __clear_bit(p_spq->comp_bitmap_idx, + p_spq->p_comp_bitmap); p_spq->comp_bitmap_idx++; qed_chain_return_produced(&p_spq->chain); } @@ -833,8 +856,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code); - if (found->comp_mode != QED_SPQ_MODE_EBLOCK) - /* EBLOCK is responsible for freeing its own entry */ + if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) || + (found->queue == &p_spq->unlimited_pending)) + /* EBLOCK is responsible for returning its own entry into the + * free list, unless it originally added the entry into the + * unlimited pending list. + */ qed_spq_return_entry(p_hwfn, found); /* Attempt to post pending requests */ @@ -860,9 +887,9 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn) if (qed_chain_alloc(p_hwfn->cdev, QED_CHAIN_USE_TO_PRODUCE, QED_CHAIN_MODE_PBL, + QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_PAGE_SIZE / 0x80, - 0x80, - &p_consq->chain)) { + 0x80, &p_consq->chain)) { DP_NOTICE(p_hwfn, "Failed to allocate consq chain"); goto consq_allocate_fail; } |