obj-$(CONFIG_IAVF) += iavf.o
iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o \
- iavf_txrx.o iavf_common.o i40e_adminq.o iavf_client.o
+ iavf_txrx.o iavf_common.o iavf_adminq.o iavf_client.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#include "iavf_status.h"
-#include "iavf_type.h"
-#include "iavf_register.h"
-#include "i40e_adminq.h"
-#include "iavf_prototype.h"
-
-/**
- * i40e_adminq_init_regs - Initialize AdminQ registers
- * @hw: pointer to the hardware structure
- *
- * This assumes the alloc_asq and alloc_arq functions have already been called
- **/
-static void i40e_adminq_init_regs(struct iavf_hw *hw)
-{
- /* set head and tail registers in our local struct */
- hw->aq.asq.tail = IAVF_VF_ATQT1;
- hw->aq.asq.head = IAVF_VF_ATQH1;
- hw->aq.asq.len = IAVF_VF_ATQLEN1;
- hw->aq.asq.bal = IAVF_VF_ATQBAL1;
- hw->aq.asq.bah = IAVF_VF_ATQBAH1;
- hw->aq.arq.tail = IAVF_VF_ARQT1;
- hw->aq.arq.head = IAVF_VF_ARQH1;
- hw->aq.arq.len = IAVF_VF_ARQLEN1;
- hw->aq.arq.bal = IAVF_VF_ARQBAL1;
- hw->aq.arq.bah = IAVF_VF_ARQBAH1;
-}
-
-/**
- * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
- * @hw: pointer to the hardware structure
- **/
-static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
-{
- iavf_status ret_code;
-
- ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
- i40e_mem_atq_ring,
- (hw->aq.num_asq_entries *
- sizeof(struct i40e_aq_desc)),
- IAVF_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- return ret_code;
-
- ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
- (hw->aq.num_asq_entries *
- sizeof(struct i40e_asq_cmd_details)));
- if (ret_code) {
- iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
- return ret_code;
- }
-
- return ret_code;
-}
-
-/**
- * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
- * @hw: pointer to the hardware structure
- **/
-static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
-{
- iavf_status ret_code;
-
- ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
- i40e_mem_arq_ring,
- (hw->aq.num_arq_entries *
- sizeof(struct i40e_aq_desc)),
- IAVF_ADMINQ_DESC_ALIGNMENT);
-
- return ret_code;
-}
-
-/**
- * i40e_free_adminq_asq - Free Admin Queue send rings
- * @hw: pointer to the hardware structure
- *
- * This assumes the posted send buffers have already been cleaned
- * and de-allocated
- **/
-static void i40e_free_adminq_asq(struct iavf_hw *hw)
-{
- iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
-}
-
-/**
- * i40e_free_adminq_arq - Free Admin Queue receive rings
- * @hw: pointer to the hardware structure
- *
- * This assumes the posted receive buffers have already been cleaned
- * and de-allocated
- **/
-static void i40e_free_adminq_arq(struct iavf_hw *hw)
-{
- iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
-}
-
-/**
- * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
- * @hw: pointer to the hardware structure
- **/
-static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
-{
- struct i40e_aq_desc *desc;
- struct iavf_dma_mem *bi;
- iavf_status ret_code;
- int i;
-
- /* We'll be allocating the buffer info memory first, then we can
- * allocate the mapped buffers for the event processing
- */
-
- /* buffer_info structures do not need alignment */
- ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
- (hw->aq.num_arq_entries *
- sizeof(struct iavf_dma_mem)));
- if (ret_code)
- goto alloc_arq_bufs;
- hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
-
- /* allocate the mapped buffers */
- for (i = 0; i < hw->aq.num_arq_entries; i++) {
- bi = &hw->aq.arq.r.arq_bi[i];
- ret_code = iavf_allocate_dma_mem(hw, bi,
- i40e_mem_arq_buf,
- hw->aq.arq_buf_size,
- IAVF_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- goto unwind_alloc_arq_bufs;
-
- /* now configure the descriptors for use */
- desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
-
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
- if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
- desc->opcode = 0;
- /* This is in accordance with Admin queue design, there is no
- * register for buffer size configuration
- */
- desc->datalen = cpu_to_le16((u16)bi->size);
- desc->retval = 0;
- desc->cookie_high = 0;
- desc->cookie_low = 0;
- desc->params.external.addr_high =
- cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low =
- cpu_to_le32(lower_32_bits(bi->pa));
- desc->params.external.param0 = 0;
- desc->params.external.param1 = 0;
- }
-
-alloc_arq_bufs:
- return ret_code;
-
-unwind_alloc_arq_bufs:
- /* don't try to free the one that failed... */
- i--;
- for (; i >= 0; i--)
- iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
- iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
-
- return ret_code;
-}
-
-/**
- * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
- * @hw: pointer to the hardware structure
- **/
-static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
-{
- struct iavf_dma_mem *bi;
- iavf_status ret_code;
- int i;
-
- /* No mapped memory needed yet, just the buffer info structures */
- ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
- (hw->aq.num_asq_entries *
- sizeof(struct iavf_dma_mem)));
- if (ret_code)
- goto alloc_asq_bufs;
- hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
-
- /* allocate the mapped buffers */
- for (i = 0; i < hw->aq.num_asq_entries; i++) {
- bi = &hw->aq.asq.r.asq_bi[i];
- ret_code = iavf_allocate_dma_mem(hw, bi,
- i40e_mem_asq_buf,
- hw->aq.asq_buf_size,
- IAVF_ADMINQ_DESC_ALIGNMENT);
- if (ret_code)
- goto unwind_alloc_asq_bufs;
- }
-alloc_asq_bufs:
- return ret_code;
-
-unwind_alloc_asq_bufs:
- /* don't try to free the one that failed... */
- i--;
- for (; i >= 0; i--)
- iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
- iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
-
- return ret_code;
-}
-
-/**
- * i40e_free_arq_bufs - Free receive queue buffer info elements
- * @hw: pointer to the hardware structure
- **/
-static void i40e_free_arq_bufs(struct iavf_hw *hw)
-{
- int i;
-
- /* free descriptors */
- for (i = 0; i < hw->aq.num_arq_entries; i++)
- iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
-
- /* free the descriptor memory */
- iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
-
- /* free the dma header */
- iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
-}
-
-/**
- * i40e_free_asq_bufs - Free send queue buffer info elements
- * @hw: pointer to the hardware structure
- **/
-static void i40e_free_asq_bufs(struct iavf_hw *hw)
-{
- int i;
-
- /* only unmap if the address is non-NULL */
- for (i = 0; i < hw->aq.num_asq_entries; i++)
- if (hw->aq.asq.r.asq_bi[i].pa)
- iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
-
- /* free the buffer info list */
- iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
-
- /* free the descriptor memory */
- iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
-
- /* free the dma header */
- iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
-}
-
-/**
- * i40e_config_asq_regs - configure ASQ registers
- * @hw: pointer to the hardware structure
- *
- * Configure base address and length registers for the transmit queue
- **/
-static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
-{
- iavf_status ret_code = 0;
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
-
- /* set starting point */
- wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
- IAVF_VF_ATQLEN1_ATQENABLE_MASK));
- wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
- wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
-
- /* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.asq.bal);
- if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
-
- return ret_code;
-}
-
-/**
- * i40e_config_arq_regs - ARQ register configuration
- * @hw: pointer to the hardware structure
- *
- * Configure base address and length registers for the receive (event queue)
- **/
-static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
-{
- iavf_status ret_code = 0;
- u32 reg = 0;
-
- /* Clear Head and Tail */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
-
- /* set starting point */
- wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
- IAVF_VF_ARQLEN1_ARQENABLE_MASK));
- wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
- wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
-
- /* Update tail in the HW to post pre-allocated buffers */
- wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
-
- /* Check one register to verify that config was applied */
- reg = rd32(hw, hw->aq.arq.bal);
- if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
-
- return ret_code;
-}
-
-/**
- * i40e_init_asq - main initialization routine for ASQ
- * @hw: pointer to the hardware structure
- *
- * This is the main initialization routine for the Admin Send Queue
- * Prior to calling this function, drivers *MUST* set the following fields
- * in the hw->aq structure:
- * - hw->aq.num_asq_entries
- * - hw->aq.arq_buf_size
- *
- * Do *NOT* hold the lock when calling this as the memory allocation routines
- * called are not going to be atomic context safe
- **/
-static iavf_status i40e_init_asq(struct iavf_hw *hw)
-{
- iavf_status ret_code = 0;
-
- if (hw->aq.asq.count > 0) {
- /* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
- goto init_adminq_exit;
- }
-
- /* verify input for valid configuration */
- if ((hw->aq.num_asq_entries == 0) ||
- (hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
- goto init_adminq_exit;
- }
-
- hw->aq.asq.next_to_use = 0;
- hw->aq.asq.next_to_clean = 0;
-
- /* allocate the ring memory */
- ret_code = i40e_alloc_adminq_asq_ring(hw);
- if (ret_code)
- goto init_adminq_exit;
-
- /* allocate buffers in the rings */
- ret_code = i40e_alloc_asq_bufs(hw);
- if (ret_code)
- goto init_adminq_free_rings;
-
- /* initialize base registers */
- ret_code = i40e_config_asq_regs(hw);
- if (ret_code)
- goto init_adminq_free_rings;
-
- /* success! */
- hw->aq.asq.count = hw->aq.num_asq_entries;
- goto init_adminq_exit;
-
-init_adminq_free_rings:
- i40e_free_adminq_asq(hw);
-
-init_adminq_exit:
- return ret_code;
-}
-
-/**
- * i40e_init_arq - initialize ARQ
- * @hw: pointer to the hardware structure
- *
- * The main initialization routine for the Admin Receive (Event) Queue.
- * Prior to calling this function, drivers *MUST* set the following fields
- * in the hw->aq structure:
- * - hw->aq.num_asq_entries
- * - hw->aq.arq_buf_size
- *
- * Do *NOT* hold the lock when calling this as the memory allocation routines
- * called are not going to be atomic context safe
- **/
-static iavf_status i40e_init_arq(struct iavf_hw *hw)
-{
- iavf_status ret_code = 0;
-
- if (hw->aq.arq.count > 0) {
- /* queue already initialized */
- ret_code = I40E_ERR_NOT_READY;
- goto init_adminq_exit;
- }
-
- /* verify input for valid configuration */
- if ((hw->aq.num_arq_entries == 0) ||
- (hw->aq.arq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
- goto init_adminq_exit;
- }
-
- hw->aq.arq.next_to_use = 0;
- hw->aq.arq.next_to_clean = 0;
-
- /* allocate the ring memory */
- ret_code = i40e_alloc_adminq_arq_ring(hw);
- if (ret_code)
- goto init_adminq_exit;
-
- /* allocate buffers in the rings */
- ret_code = i40e_alloc_arq_bufs(hw);
- if (ret_code)
- goto init_adminq_free_rings;
-
- /* initialize base registers */
- ret_code = i40e_config_arq_regs(hw);
- if (ret_code)
- goto init_adminq_free_rings;
-
- /* success! */
- hw->aq.arq.count = hw->aq.num_arq_entries;
- goto init_adminq_exit;
-
-init_adminq_free_rings:
- i40e_free_adminq_arq(hw);
-
-init_adminq_exit:
- return ret_code;
-}
-
-/**
- * i40e_shutdown_asq - shutdown the ASQ
- * @hw: pointer to the hardware structure
- *
- * The main shutdown routine for the Admin Send Queue
- **/
-static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
-{
- iavf_status ret_code = 0;
-
- mutex_lock(&hw->aq.asq_mutex);
-
- if (hw->aq.asq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
- goto shutdown_asq_out;
- }
-
- /* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.asq.head, 0);
- wr32(hw, hw->aq.asq.tail, 0);
- wr32(hw, hw->aq.asq.len, 0);
- wr32(hw, hw->aq.asq.bal, 0);
- wr32(hw, hw->aq.asq.bah, 0);
-
- hw->aq.asq.count = 0; /* to indicate uninitialized queue */
-
- /* free ring buffers */
- i40e_free_asq_bufs(hw);
-
-shutdown_asq_out:
- mutex_unlock(&hw->aq.asq_mutex);
- return ret_code;
-}
-
-/**
- * i40e_shutdown_arq - shutdown ARQ
- * @hw: pointer to the hardware structure
- *
- * The main shutdown routine for the Admin Receive Queue
- **/
-static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
-{
- iavf_status ret_code = 0;
-
- mutex_lock(&hw->aq.arq_mutex);
-
- if (hw->aq.arq.count == 0) {
- ret_code = I40E_ERR_NOT_READY;
- goto shutdown_arq_out;
- }
-
- /* Stop firmware AdminQ processing */
- wr32(hw, hw->aq.arq.head, 0);
- wr32(hw, hw->aq.arq.tail, 0);
- wr32(hw, hw->aq.arq.len, 0);
- wr32(hw, hw->aq.arq.bal, 0);
- wr32(hw, hw->aq.arq.bah, 0);
-
- hw->aq.arq.count = 0; /* to indicate uninitialized queue */
-
- /* free ring buffers */
- i40e_free_arq_bufs(hw);
-
-shutdown_arq_out:
- mutex_unlock(&hw->aq.arq_mutex);
- return ret_code;
-}
-
-/**
- * iavf_init_adminq - main initialization routine for Admin Queue
- * @hw: pointer to the hardware structure
- *
- * Prior to calling this function, drivers *MUST* set the following fields
- * in the hw->aq structure:
- * - hw->aq.num_asq_entries
- * - hw->aq.num_arq_entries
- * - hw->aq.arq_buf_size
- * - hw->aq.asq_buf_size
- **/
-iavf_status iavf_init_adminq(struct iavf_hw *hw)
-{
- iavf_status ret_code;
-
- /* verify input for valid configuration */
- if ((hw->aq.num_arq_entries == 0) ||
- (hw->aq.num_asq_entries == 0) ||
- (hw->aq.arq_buf_size == 0) ||
- (hw->aq.asq_buf_size == 0)) {
- ret_code = I40E_ERR_CONFIG;
- goto init_adminq_exit;
- }
-
- /* Set up register offsets */
- i40e_adminq_init_regs(hw);
-
- /* setup ASQ command write back timeout */
- hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
-
- /* allocate the ASQ */
- ret_code = i40e_init_asq(hw);
- if (ret_code)
- goto init_adminq_destroy_locks;
-
- /* allocate the ARQ */
- ret_code = i40e_init_arq(hw);
- if (ret_code)
- goto init_adminq_free_asq;
-
- /* success! */
- goto init_adminq_exit;
-
-init_adminq_free_asq:
- i40e_shutdown_asq(hw);
-init_adminq_destroy_locks:
-
-init_adminq_exit:
- return ret_code;
-}
-
-/**
- * iavf_shutdown_adminq - shutdown routine for the Admin Queue
- * @hw: pointer to the hardware structure
- **/
-iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
-{
- iavf_status ret_code = 0;
-
- if (iavf_check_asq_alive(hw))
- iavf_aq_queue_shutdown(hw, true);
-
- i40e_shutdown_asq(hw);
- i40e_shutdown_arq(hw);
-
- return ret_code;
-}
-
-/**
- * i40e_clean_asq - cleans Admin send queue
- * @hw: pointer to the hardware structure
- *
- * returns the number of free desc
- **/
-static u16 i40e_clean_asq(struct iavf_hw *hw)
-{
- struct iavf_adminq_ring *asq = &hw->aq.asq;
- struct i40e_asq_cmd_details *details;
- u16 ntc = asq->next_to_clean;
- struct i40e_aq_desc desc_cb;
- struct i40e_aq_desc *desc;
-
- desc = IAVF_ADMINQ_DESC(*asq, ntc);
- details = I40E_ADMINQ_DETAILS(*asq, ntc);
- while (rd32(hw, hw->aq.asq.head) != ntc) {
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
-
- if (details->callback) {
- I40E_ADMINQ_CALLBACK cb_func =
- (I40E_ADMINQ_CALLBACK)details->callback;
- desc_cb = *desc;
- cb_func(hw, &desc_cb);
- }
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
- memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
- ntc++;
- if (ntc == asq->count)
- ntc = 0;
- desc = IAVF_ADMINQ_DESC(*asq, ntc);
- details = I40E_ADMINQ_DETAILS(*asq, ntc);
- }
-
- asq->next_to_clean = ntc;
-
- return IAVF_DESC_UNUSED(asq);
-}
-
-/**
- * iavf_asq_done - check if FW has processed the Admin Send Queue
- * @hw: pointer to the hw struct
- *
- * Returns true if the firmware has processed all descriptors on the
- * admin send queue. Returns false if there are still requests pending.
- **/
-bool iavf_asq_done(struct iavf_hw *hw)
-{
- /* AQ designers suggest use of head for better
- * timing reliability than DD bit
- */
- return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
-}
-
-/**
- * iavf_asq_send_command - send command to Admin Queue
- * @hw: pointer to the hw struct
- * @desc: prefilled descriptor describing the command (non DMA mem)
- * @buff: buffer to use for indirect commands
- * @buff_size: size of buffer for indirect commands
- * @cmd_details: pointer to command details structure
- *
- * This is the main send command driver routine for the Admin Queue send
- * queue. It runs the queue, cleans the queue, etc
- **/
-iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
- void *buff, /* can be NULL */
- u16 buff_size,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct iavf_dma_mem *dma_buff = NULL;
- struct i40e_asq_cmd_details *details;
- struct i40e_aq_desc *desc_on_ring;
- bool cmd_completed = false;
- iavf_status status = 0;
- u16 retval = 0;
- u32 val = 0;
-
- mutex_lock(&hw->aq.asq_mutex);
-
- if (hw->aq.asq.count == 0) {
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: Admin queue not initialized.\n");
- status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_error;
- }
-
- hw->aq.asq_last_status = I40E_AQ_RC_OK;
-
- val = rd32(hw, hw->aq.asq.head);
- if (val >= hw->aq.num_asq_entries) {
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
- goto asq_send_command_error;
- }
-
- details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
- if (cmd_details) {
- *details = *cmd_details;
-
- /* If the cmd_details are defined copy the cookie. The
- * cpu_to_le32 is not needed here because the data is ignored
- * by the FW, only used by the driver
- */
- if (details->cookie) {
- desc->cookie_high =
- cpu_to_le32(upper_32_bits(details->cookie));
- desc->cookie_low =
- cpu_to_le32(lower_32_bits(details->cookie));
- }
- } else {
- memset(details, 0, sizeof(struct i40e_asq_cmd_details));
- }
-
- /* clear requested flags and then set additional flags if defined */
- desc->flags &= ~cpu_to_le16(details->flags_dis);
- desc->flags |= cpu_to_le16(details->flags_ena);
-
- if (buff_size > hw->aq.asq_buf_size) {
- iavf_debug(hw,
- IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: Invalid buffer size: %d.\n",
- buff_size);
- status = I40E_ERR_INVALID_SIZE;
- goto asq_send_command_error;
- }
-
- if (details->postpone && !details->async) {
- iavf_debug(hw,
- IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: Async flag not set along with postpone flag");
- status = I40E_ERR_PARAM;
- goto asq_send_command_error;
- }
-
- /* call clean and check queue available function to reclaim the
- * descriptors that were processed by FW, the function returns the
- * number of desc available
- */
- /* the clean function called here could be called in a separate thread
- * in case of asynchronous completions
- */
- if (i40e_clean_asq(hw) == 0) {
- iavf_debug(hw,
- IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: Error queue is full.\n");
- status = I40E_ERR_ADMIN_QUEUE_FULL;
- goto asq_send_command_error;
- }
-
- /* initialize the temp desc pointer with the right desc */
- desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
-
- /* if the desc is available copy the temp desc to the right place */
- *desc_on_ring = *desc;
-
- /* if buff is not NULL assume indirect command */
- if (buff) {
- dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
- /* copy the user buff into the respective DMA buff */
- memcpy(dma_buff->va, buff, buff_size);
- desc_on_ring->datalen = cpu_to_le16(buff_size);
-
- /* Update the address values in the desc with the pa value
- * for respective buffer
- */
- desc_on_ring->params.external.addr_high =
- cpu_to_le32(upper_32_bits(dma_buff->pa));
- desc_on_ring->params.external.addr_low =
- cpu_to_le32(lower_32_bits(dma_buff->pa));
- }
-
- /* bump the tail */
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
- iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
- buff, buff_size);
- (hw->aq.asq.next_to_use)++;
- if (hw->aq.asq.next_to_use == hw->aq.asq.count)
- hw->aq.asq.next_to_use = 0;
- if (!details->postpone)
- wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
-
- /* if cmd_details are not defined or async flag is not set,
- * we need to wait for desc write back
- */
- if (!details->async && !details->postpone) {
- u32 total_delay = 0;
-
- do {
- /* AQ designers suggest use of head for better
- * timing reliability than DD bit
- */
- if (iavf_asq_done(hw))
- break;
- udelay(50);
- total_delay += 50;
- } while (total_delay < hw->aq.asq_cmd_timeout);
- }
-
- /* if ready, copy the desc back to temp */
- if (iavf_asq_done(hw)) {
- *desc = *desc_on_ring;
- if (buff)
- memcpy(buff, dma_buff->va, buff_size);
- retval = le16_to_cpu(desc->retval);
- if (retval != 0) {
- iavf_debug(hw,
- IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: Command completed with error 0x%X.\n",
- retval);
-
- /* strip off FW internal code */
- retval &= 0xff;
- }
- cmd_completed = true;
- if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
- status = 0;
- else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
- status = I40E_ERR_NOT_READY;
- else
- status = I40E_ERR_ADMIN_QUEUE_ERROR;
- hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
- }
-
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: desc and buffer writeback:\n");
- iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
-
- /* save writeback aq if requested */
- if (details->wb_desc)
- *details->wb_desc = *desc_on_ring;
-
- /* update the error if time out occurred */
- if ((!cmd_completed) &&
- (!details->async && !details->postpone)) {
- if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: AQ Critical error.\n");
- status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
- } else {
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "AQTX: Writeback timeout.\n");
- status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
- }
- }
-
-asq_send_command_error:
- mutex_unlock(&hw->aq.asq_mutex);
- return status;
-}
-
-/**
- * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
- * @desc: pointer to the temp descriptor (non DMA mem)
- * @opcode: the opcode can be used to decide which flags to turn off or on
- *
- * Fill the desc with default values
- **/
-void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
-{
- /* zero out the desc */
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
- desc->opcode = cpu_to_le16(opcode);
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
-}
-
-/**
- * iavf_clean_arq_element
- * @hw: pointer to the hw struct
- * @e: event info from the receive descriptor, includes any buffers
- * @pending: number of events that could be left to process
- *
- * This function cleans one Admin Receive Queue element and returns
- * the contents through e. It can also return how many events are
- * left to process through 'pending'
- **/
-iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
- struct i40e_arq_event_info *e,
- u16 *pending)
-{
- u16 ntc = hw->aq.arq.next_to_clean;
- struct i40e_aq_desc *desc;
- iavf_status ret_code = 0;
- struct iavf_dma_mem *bi;
- u16 desc_idx;
- u16 datalen;
- u16 flags;
- u16 ntu;
-
- /* pre-clean the event info */
- memset(&e->desc, 0, sizeof(e->desc));
-
- /* take the lock before we start messing with the ring */
- mutex_lock(&hw->aq.arq_mutex);
-
- if (hw->aq.arq.count == 0) {
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
- "AQRX: Admin queue not initialized.\n");
- ret_code = I40E_ERR_QUEUE_EMPTY;
- goto clean_arq_element_err;
- }
-
- /* set next_to_use to head */
- ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
- if (ntu == ntc) {
- /* nothing to do - shouldn't need to update ring's values */
- ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
- goto clean_arq_element_out;
- }
-
- /* now clean the next descriptor */
- desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
- desc_idx = ntc;
-
- hw->aq.arq_last_status =
- (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
- flags = le16_to_cpu(desc->flags);
- if (flags & I40E_AQ_FLAG_ERR) {
- ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
- iavf_debug(hw,
- IAVF_DEBUG_AQ_MESSAGE,
- "AQRX: Event received with error 0x%X.\n",
- hw->aq.arq_last_status);
- }
-
- e->desc = *desc;
- datalen = le16_to_cpu(desc->datalen);
- e->msg_len = min(datalen, e->buf_len);
- if (e->msg_buf && (e->msg_len != 0))
- memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
- e->msg_len);
-
- iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
- iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
- hw->aq.arq_buf_size);
-
- /* Restore the original datalen and buffer address in the desc,
- * FW updates datalen to indicate the event message
- * size
- */
- bi = &hw->aq.arq.r.arq_bi[ntc];
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
-
- desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
- if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
- desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
- desc->datalen = cpu_to_le16((u16)bi->size);
- desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
- desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
-
- /* set tail = the last cleaned desc index. */
- wr32(hw, hw->aq.arq.tail, ntc);
- /* ntc is updated to tail + 1 */
- ntc++;
- if (ntc == hw->aq.num_arq_entries)
- ntc = 0;
- hw->aq.arq.next_to_clean = ntc;
- hw->aq.arq.next_to_use = ntu;
-
-clean_arq_element_out:
- /* Set pending if needed, unlock and return */
- if (pending)
- *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
-
-clean_arq_element_err:
- mutex_unlock(&hw->aq.arq_mutex);
-
- return ret_code;
-}
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _IAVF_ADMINQ_H_
-#define _IAVF_ADMINQ_H_
-
-#include "iavf_osdep.h"
-#include "iavf_status.h"
-#include "i40e_adminq_cmd.h"
-
-#define IAVF_ADMINQ_DESC(R, i) \
- (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
-
-#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
-
-struct iavf_adminq_ring {
- struct iavf_virt_mem dma_head; /* space for dma structures */
- struct iavf_dma_mem desc_buf; /* descriptor ring memory */
- struct iavf_virt_mem cmd_buf; /* command buffer memory */
-
- union {
- struct iavf_dma_mem *asq_bi;
- struct iavf_dma_mem *arq_bi;
- } r;
-
- u16 count; /* Number of descriptors */
- u16 rx_buf_len; /* Admin Receive Queue buffer length */
-
- /* used for interrupt processing */
- u16 next_to_use;
- u16 next_to_clean;
-
- /* used for queue tracking */
- u32 head;
- u32 tail;
- u32 len;
- u32 bah;
- u32 bal;
-};
-
-/* ASQ transaction details */
-struct i40e_asq_cmd_details {
- void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
- u64 cookie;
- u16 flags_ena;
- u16 flags_dis;
- bool async;
- bool postpone;
- struct i40e_aq_desc *wb_desc;
-};
-
-#define I40E_ADMINQ_DETAILS(R, i) \
- (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
-
-/* ARQ event information */
-struct i40e_arq_event_info {
- struct i40e_aq_desc desc;
- u16 msg_len;
- u16 buf_len;
- u8 *msg_buf;
-};
-
-/* Admin Queue information */
-struct iavf_adminq_info {
- struct iavf_adminq_ring arq; /* receive queue */
- struct iavf_adminq_ring asq; /* send queue */
- u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
- u16 num_arq_entries; /* receive queue depth */
- u16 num_asq_entries; /* send queue depth */
- u16 arq_buf_size; /* receive queue buffer size */
- u16 asq_buf_size; /* send queue buffer size */
- u16 fw_maj_ver; /* firmware major version */
- u16 fw_min_ver; /* firmware minor version */
- u32 fw_build; /* firmware build number */
- u16 api_maj_ver; /* api major version */
- u16 api_min_ver; /* api minor version */
-
- struct mutex asq_mutex; /* Send queue lock */
- struct mutex arq_mutex; /* Receive queue lock */
-
- /* last status values on send and receive queues */
- enum i40e_admin_queue_err asq_last_status;
- enum i40e_admin_queue_err arq_last_status;
-};
-
-/**
- * i40e_aq_rc_to_posix - convert errors to user-land codes
- * aq_ret: AdminQ handler error code can override aq_rc
- * aq_rc: AdminQ firmware error code to convert
- **/
-static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
-{
- int aq_to_posix[] = {
- 0, /* I40E_AQ_RC_OK */
- -EPERM, /* I40E_AQ_RC_EPERM */
- -ENOENT, /* I40E_AQ_RC_ENOENT */
- -ESRCH, /* I40E_AQ_RC_ESRCH */
- -EINTR, /* I40E_AQ_RC_EINTR */
- -EIO, /* I40E_AQ_RC_EIO */
- -ENXIO, /* I40E_AQ_RC_ENXIO */
- -E2BIG, /* I40E_AQ_RC_E2BIG */
- -EAGAIN, /* I40E_AQ_RC_EAGAIN */
- -ENOMEM, /* I40E_AQ_RC_ENOMEM */
- -EACCES, /* I40E_AQ_RC_EACCES */
- -EFAULT, /* I40E_AQ_RC_EFAULT */
- -EBUSY, /* I40E_AQ_RC_EBUSY */
- -EEXIST, /* I40E_AQ_RC_EEXIST */
- -EINVAL, /* I40E_AQ_RC_EINVAL */
- -ENOTTY, /* I40E_AQ_RC_ENOTTY */
- -ENOSPC, /* I40E_AQ_RC_ENOSPC */
- -ENOSYS, /* I40E_AQ_RC_ENOSYS */
- -ERANGE, /* I40E_AQ_RC_ERANGE */
- -EPIPE, /* I40E_AQ_RC_EFLUSHED */
- -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
- -EROFS, /* I40E_AQ_RC_EMODE */
- -EFBIG, /* I40E_AQ_RC_EFBIG */
- };
-
- /* aq_rc is invalid if AQ timed out */
- if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
- return -EAGAIN;
-
- if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
- return -ERANGE;
-
- return aq_to_posix[aq_rc];
-}
-
-/* general information */
-#define I40E_AQ_LARGE_BUF 512
-#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
-
-void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);
-
-#endif /* _IAVF_ADMINQ_H_ */
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright(c) 2013 - 2018 Intel Corporation. */
-
-#ifndef _I40E_ADMINQ_CMD_H_
-#define _I40E_ADMINQ_CMD_H_
-
-/* This header file defines the i40e Admin Queue commands and is shared between
- * i40e Firmware and Software. Do not change the names in this file to IAVF
- * because this file should be diff-able against the i40e version, even
- * though many parts have been removed in this VF version.
- *
- * This file needs to comply with the Linux Kernel coding style.
- */
-
-#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0008
-
-#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
- I40E_FW_API_VERSION_MINOR_X710 : \
- I40E_FW_API_VERSION_MINOR_X722)
-
-/* API version 1.7 implements additional link and PHY-specific APIs */
-#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
-
-struct i40e_aq_desc {
- __le16 flags;
- __le16 opcode;
- __le16 datalen;
- __le16 retval;
- __le32 cookie_high;
- __le32 cookie_low;
- union {
- struct {
- __le32 param0;
- __le32 param1;
- __le32 param2;
- __le32 param3;
- } internal;
- struct {
- __le32 param0;
- __le32 param1;
- __le32 addr_high;
- __le32 addr_low;
- } external;
- u8 raw[16];
- } params;
-};
-
-/* Flags sub-structure
- * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
- * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
- */
-
-/* command flags and offsets*/
-#define I40E_AQ_FLAG_DD_SHIFT 0
-#define I40E_AQ_FLAG_CMP_SHIFT 1
-#define I40E_AQ_FLAG_ERR_SHIFT 2
-#define I40E_AQ_FLAG_VFE_SHIFT 3
-#define I40E_AQ_FLAG_LB_SHIFT 9
-#define I40E_AQ_FLAG_RD_SHIFT 10
-#define I40E_AQ_FLAG_VFC_SHIFT 11
-#define I40E_AQ_FLAG_BUF_SHIFT 12
-#define I40E_AQ_FLAG_SI_SHIFT 13
-#define I40E_AQ_FLAG_EI_SHIFT 14
-#define I40E_AQ_FLAG_FE_SHIFT 15
-
-#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
-
-/* error codes */
-enum i40e_admin_queue_err {
- I40E_AQ_RC_OK = 0, /* success */
- I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
- I40E_AQ_RC_ENOENT = 2, /* No such element */
- I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
- I40E_AQ_RC_EINTR = 4, /* operation interrupted */
- I40E_AQ_RC_EIO = 5, /* I/O error */
- I40E_AQ_RC_ENXIO = 6, /* No such resource */
- I40E_AQ_RC_E2BIG = 7, /* Arg too long */
- I40E_AQ_RC_EAGAIN = 8, /* Try again */
- I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
- I40E_AQ_RC_EACCES = 10, /* Permission denied */
- I40E_AQ_RC_EFAULT = 11, /* Bad address */
- I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
- I40E_AQ_RC_EEXIST = 13, /* object already exists */
- I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
- I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
- I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
- I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
- I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
- I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
- I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
- I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
- I40E_AQ_RC_EFBIG = 22, /* File too large */
-};
-
-/* Admin Queue command opcodes */
-enum i40e_admin_queue_opc {
- /* aq commands */
- i40e_aqc_opc_get_version = 0x0001,
- i40e_aqc_opc_driver_version = 0x0002,
- i40e_aqc_opc_queue_shutdown = 0x0003,
- i40e_aqc_opc_set_pf_context = 0x0004,
-
- /* resource ownership */
- i40e_aqc_opc_request_resource = 0x0008,
- i40e_aqc_opc_release_resource = 0x0009,
-
- i40e_aqc_opc_list_func_capabilities = 0x000A,
- i40e_aqc_opc_list_dev_capabilities = 0x000B,
-
- /* Proxy commands */
- i40e_aqc_opc_set_proxy_config = 0x0104,
- i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
-
- /* LAA */
- i40e_aqc_opc_mac_address_read = 0x0107,
- i40e_aqc_opc_mac_address_write = 0x0108,
-
- /* PXE */
- i40e_aqc_opc_clear_pxe_mode = 0x0110,
-
- /* WoL commands */
- i40e_aqc_opc_set_wol_filter = 0x0120,
- i40e_aqc_opc_get_wake_reason = 0x0121,
-
- /* internal switch commands */
- i40e_aqc_opc_get_switch_config = 0x0200,
- i40e_aqc_opc_add_statistics = 0x0201,
- i40e_aqc_opc_remove_statistics = 0x0202,
- i40e_aqc_opc_set_port_parameters = 0x0203,
- i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
- i40e_aqc_opc_set_switch_config = 0x0205,
- i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
- i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
-
- i40e_aqc_opc_add_vsi = 0x0210,
- i40e_aqc_opc_update_vsi_parameters = 0x0211,
- i40e_aqc_opc_get_vsi_parameters = 0x0212,
-
- i40e_aqc_opc_add_pv = 0x0220,
- i40e_aqc_opc_update_pv_parameters = 0x0221,
- i40e_aqc_opc_get_pv_parameters = 0x0222,
-
- i40e_aqc_opc_add_veb = 0x0230,
- i40e_aqc_opc_update_veb_parameters = 0x0231,
- i40e_aqc_opc_get_veb_parameters = 0x0232,
-
- i40e_aqc_opc_delete_element = 0x0243,
-
- i40e_aqc_opc_add_macvlan = 0x0250,
- i40e_aqc_opc_remove_macvlan = 0x0251,
- i40e_aqc_opc_add_vlan = 0x0252,
- i40e_aqc_opc_remove_vlan = 0x0253,
- i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
- i40e_aqc_opc_add_tag = 0x0255,
- i40e_aqc_opc_remove_tag = 0x0256,
- i40e_aqc_opc_add_multicast_etag = 0x0257,
- i40e_aqc_opc_remove_multicast_etag = 0x0258,
- i40e_aqc_opc_update_tag = 0x0259,
- i40e_aqc_opc_add_control_packet_filter = 0x025A,
- i40e_aqc_opc_remove_control_packet_filter = 0x025B,
- i40e_aqc_opc_add_cloud_filters = 0x025C,
- i40e_aqc_opc_remove_cloud_filters = 0x025D,
- i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
-
- i40e_aqc_opc_add_mirror_rule = 0x0260,
- i40e_aqc_opc_delete_mirror_rule = 0x0261,
-
- /* Dynamic Device Personalization */
- i40e_aqc_opc_write_personalization_profile = 0x0270,
- i40e_aqc_opc_get_personalization_profile_list = 0x0271,
-
- /* DCB commands */
- i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
- i40e_aqc_opc_dcb_updated = 0x0302,
- i40e_aqc_opc_set_dcb_parameters = 0x0303,
-
- /* TX scheduler */
- i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
- i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
- i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
- i40e_aqc_opc_query_vsi_bw_config = 0x0408,
- i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
- i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
-
- i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
- i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
- i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
- i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
- i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
- i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
- i40e_aqc_opc_query_port_ets_config = 0x0419,
- i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
- i40e_aqc_opc_suspend_port_tx = 0x041B,
- i40e_aqc_opc_resume_port_tx = 0x041C,
- i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
- /* phy commands*/
- i40e_aqc_opc_get_phy_abilities = 0x0600,
- i40e_aqc_opc_set_phy_config = 0x0601,
- i40e_aqc_opc_set_mac_config = 0x0603,
- i40e_aqc_opc_set_link_restart_an = 0x0605,
- i40e_aqc_opc_get_link_status = 0x0607,
- i40e_aqc_opc_set_phy_int_mask = 0x0613,
- i40e_aqc_opc_get_local_advt_reg = 0x0614,
- i40e_aqc_opc_set_local_advt_reg = 0x0615,
- i40e_aqc_opc_get_partner_advt = 0x0616,
- i40e_aqc_opc_set_lb_modes = 0x0618,
- i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_debug = 0x0622,
- i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
- i40e_aqc_opc_run_phy_activity = 0x0626,
- i40e_aqc_opc_set_phy_register = 0x0628,
- i40e_aqc_opc_get_phy_register = 0x0629,
-
- /* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
- i40e_aqc_opc_nvm_config_read = 0x0704,
- i40e_aqc_opc_nvm_config_write = 0x0705,
- i40e_aqc_opc_oem_post_update = 0x0720,
- i40e_aqc_opc_thermal_sensor = 0x0721,
-
- /* virtualization commands */
- i40e_aqc_opc_send_msg_to_pf = 0x0801,
- i40e_aqc_opc_send_msg_to_vf = 0x0802,
- i40e_aqc_opc_send_msg_to_peer = 0x0803,
-
- /* alternate structure */
- i40e_aqc_opc_alternate_write = 0x0900,
- i40e_aqc_opc_alternate_write_indirect = 0x0901,
- i40e_aqc_opc_alternate_read = 0x0902,
- i40e_aqc_opc_alternate_read_indirect = 0x0903,
- i40e_aqc_opc_alternate_write_done = 0x0904,
- i40e_aqc_opc_alternate_set_mode = 0x0905,
- i40e_aqc_opc_alternate_clear_port = 0x0906,
-
- /* LLDP commands */
- i40e_aqc_opc_lldp_get_mib = 0x0A00,
- i40e_aqc_opc_lldp_update_mib = 0x0A01,
- i40e_aqc_opc_lldp_add_tlv = 0x0A02,
- i40e_aqc_opc_lldp_update_tlv = 0x0A03,
- i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
- i40e_aqc_opc_lldp_stop = 0x0A05,
- i40e_aqc_opc_lldp_start = 0x0A06,
-
- /* Tunnel commands */
- i40e_aqc_opc_add_udp_tunnel = 0x0B00,
- i40e_aqc_opc_del_udp_tunnel = 0x0B01,
- i40e_aqc_opc_set_rss_key = 0x0B02,
- i40e_aqc_opc_set_rss_lut = 0x0B03,
- i40e_aqc_opc_get_rss_key = 0x0B04,
- i40e_aqc_opc_get_rss_lut = 0x0B05,
-
- /* Async Events */
- i40e_aqc_opc_event_lan_overflow = 0x1001,
-
- /* OEM commands */
- i40e_aqc_opc_oem_parameter_change = 0xFE00,
- i40e_aqc_opc_oem_device_status_change = 0xFE01,
- i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
- i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
-
- /* debug commands */
- i40e_aqc_opc_debug_read_reg = 0xFF03,
- i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_modify_reg = 0xFF07,
- i40e_aqc_opc_debug_dump_internals = 0xFF08,
-};
-
-/* command structures and indirect data structures */
-
-/* Structure naming conventions:
- * - no suffix for direct command descriptor structures
- * - _data for indirect sent data
- * - _resp for indirect return data (data which is both will use _data)
- * - _completion for direct return data
- * - _element_ for repeated elements (may also be _data or _resp)
- *
- * Command structures are expected to overlay the params.raw member of the basic
- * descriptor, and as such cannot exceed 16 bytes in length.
- */
-
-/* This macro is used to generate a compilation error if a structure
- * is not exactly the correct length. It gives a divide by zero error if the
- * structure is not of the correct size, otherwise it creates an enum that is
- * never used.
- */
-#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
- { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
-
-/* This macro is used extensively to ensure that command structures are 16
- * bytes in length as they have to map to the raw array of that size.
- */
-#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
-
-/* Queue Shutdown (direct 0x0003) */
-struct i40e_aqc_queue_shutdown {
- __le32 driver_unloading;
-#define I40E_AQ_DRIVER_UNLOADING 0x1
- u8 reserved[12];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
-
-struct i40e_aqc_vsi_properties_data {
- /* first 96 byte are written by SW */
- __le16 valid_sections;
-#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
-#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
-#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
-#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
-#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
-#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
-#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
-#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
-#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
-#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
- /* switch section */
- __le16 switch_id; /* 12bit id combined with flags below */
-#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
-#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
-#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
-#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
-#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
- u8 sw_reserved[2];
- /* security section */
- u8 sec_flags;
-#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
-#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
- u8 sec_reserved;
- /* VLAN section */
- __le16 pvid; /* VLANS include priority bits */
- __le16 fcoe_pvid;
- u8 port_vlan_flags;
-#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
-#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
- I40E_AQ_VSI_PVLAN_MODE_SHIFT)
-#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
-#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
-#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
-#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
-#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
-#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
- I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
-#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
-#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
-#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
- u8 pvlan_reserved[3];
- /* ingress egress up sections */
- __le32 ingress_table; /* bitmap, 3 bits per up */
-#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
-#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
-#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
-#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
-#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
-#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
-#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
-#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
-#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
-#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
- I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
- __le32 egress_table; /* same defines as for ingress table */
- /* cascaded PV section */
- __le16 cas_pv_tag;
- u8 cas_pv_flags;
-#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
- I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
-#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
-#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
-#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
-#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
-#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
-#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
- u8 cas_pv_reserved;
- /* queue mapping section */
- __le16 mapping_flags;
-#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
-#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
- __le16 queue_mapping[16];
-#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
-#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
- __le16 tc_mapping[8];
-#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
-#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
- I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
-#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
-#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
- I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
- /* queueing option section */
- u8 queueing_opt_flags;
-#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
-#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
-#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
-#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
-#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
- u8 queueing_opt_reserved[3];
- /* scheduler section */
- u8 up_enable_bits;
- u8 sched_reserved;
- /* outer up section */
- __le32 outer_up_table; /* same structure and defines as ingress tbl */
- u8 cmd_reserved[8];
- /* last 32 bytes are written by FW */
- __le16 qs_handle[8];
-#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
- __le16 stat_counter_idx;
- __le16 sched_id;
- u8 resp_reserved[12];
-};
-
-I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
-
-/* Get VEB Parameters (direct 0x0232)
- * uses i40e_aqc_switch_seid for the descriptor
- */
-struct i40e_aqc_get_veb_parameters_completion {
- __le16 seid;
- __le16 switch_id;
- __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
- __le16 statistic_index;
- __le16 vebs_used;
- __le16 vebs_free;
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
-
-#define I40E_LINK_SPEED_100MB_SHIFT 0x1
-#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
-#define I40E_LINK_SPEED_10GB_SHIFT 0x3
-#define I40E_LINK_SPEED_40GB_SHIFT 0x4
-#define I40E_LINK_SPEED_20GB_SHIFT 0x5
-#define I40E_LINK_SPEED_25GB_SHIFT 0x6
-
-enum i40e_aq_link_speed {
- I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
- I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
-};
-
-/* Send to PF command (indirect 0x0801) id is only used by PF
- * Send to VF command (indirect 0x0802) id is only used by PF
- * Send to Peer PF command (indirect 0x0803)
- */
-struct i40e_aqc_pf_vf_message {
- __le32 id;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
-
-struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
- __le16 vsi_id;
- u8 reserved[6];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
-
-struct i40e_aqc_get_set_rss_key_data {
- u8 standard_rss_key[0x28];
- u8 extended_hash_key[0xc];
-};
-
-I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
-
-struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
- I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
- __le16 vsi_id;
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
- BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
-
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
- __le16 flags;
- u8 reserved[4];
- __le32 addr_high;
- __le32 addr_low;
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
-#endif /* _I40E_ADMINQ_CMD_H_ */
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#include "iavf_status.h"
+#include "iavf_type.h"
+#include "iavf_register.h"
+#include "iavf_adminq.h"
+#include "iavf_prototype.h"
+
+/**
+ * i40e_adminq_init_regs - Initialize AdminQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the alloc_asq and alloc_arq functions have already been called
+ **/
+static void i40e_adminq_init_regs(struct iavf_hw *hw)
+{
+ /* set head and tail registers in our local struct */
+ hw->aq.asq.tail = IAVF_VF_ATQT1;
+ hw->aq.asq.head = IAVF_VF_ATQH1;
+ hw->aq.asq.len = IAVF_VF_ATQLEN1;
+ hw->aq.asq.bal = IAVF_VF_ATQBAL1;
+ hw->aq.asq.bah = IAVF_VF_ATQBAH1;
+ hw->aq.arq.tail = IAVF_VF_ARQT1;
+ hw->aq.arq.head = IAVF_VF_ARQH1;
+ hw->aq.arq.len = IAVF_VF_ARQLEN1;
+ hw->aq.arq.bal = IAVF_VF_ARQBAL1;
+ hw->aq.arq.bah = IAVF_VF_ARQBAH1;
+}
+
+/**
+ * i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ **/
+static iavf_status i40e_alloc_adminq_asq_ring(struct iavf_hw *hw)
+{
+ iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
+ i40e_mem_atq_ring,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ return ret_code;
+
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
+ (hw->aq.num_asq_entries *
+ sizeof(struct i40e_asq_cmd_details)));
+ if (ret_code) {
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+ return ret_code;
+ }
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ **/
+static iavf_status i40e_alloc_adminq_arq_ring(struct iavf_hw *hw)
+{
+ iavf_status ret_code;
+
+ ret_code = iavf_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
+ i40e_mem_arq_ring,
+ (hw->aq.num_arq_entries *
+ sizeof(struct i40e_aq_desc)),
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_adminq_asq - Free Admin Queue send rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted send buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_asq(struct iavf_hw *hw)
+{
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+}
+
+/**
+ * i40e_free_adminq_arq - Free Admin Queue receive rings
+ * @hw: pointer to the hardware structure
+ *
+ * This assumes the posted receive buffers have already been cleaned
+ * and de-allocated
+ **/
+static void i40e_free_adminq_arq(struct iavf_hw *hw)
+{
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+}
+
+/**
+ * i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
+ * @hw: pointer to the hardware structure
+ **/
+static iavf_status i40e_alloc_arq_bufs(struct iavf_hw *hw)
+{
+ struct i40e_aq_desc *desc;
+ struct iavf_dma_mem *bi;
+ iavf_status ret_code;
+ int i;
+
+ /* We'll be allocating the buffer info memory first, then we can
+ * allocate the mapped buffers for the event processing
+ */
+
+ /* buffer_info structures do not need alignment */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
+ (hw->aq.num_arq_entries *
+ sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_arq_bufs;
+ hw->aq.arq.r.arq_bi = (struct iavf_dma_mem *)hw->aq.arq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_arq_entries; i++) {
+ bi = &hw->aq.arq.r.arq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ i40e_mem_arq_buf,
+ hw->aq.arq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_arq_bufs;
+
+ /* now configure the descriptors for use */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, i);
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->opcode = 0;
+ /* This is in accordance with Admin queue design, there is no
+ * register for buffer size configuration
+ */
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->retval = 0;
+ desc->cookie_high = 0;
+ desc->cookie_low = 0;
+ desc->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(bi->pa));
+ desc->params.external.param0 = 0;
+ desc->params.external.param1 = 0;
+ }
+
+alloc_arq_bufs:
+ return ret_code;
+
+unwind_alloc_arq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
+ * @hw: pointer to the hardware structure
+ **/
+static iavf_status i40e_alloc_asq_bufs(struct iavf_hw *hw)
+{
+ struct iavf_dma_mem *bi;
+ iavf_status ret_code;
+ int i;
+
+ /* No mapped memory needed yet, just the buffer info structures */
+ ret_code = iavf_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
+ (hw->aq.num_asq_entries *
+ sizeof(struct iavf_dma_mem)));
+ if (ret_code)
+ goto alloc_asq_bufs;
+ hw->aq.asq.r.asq_bi = (struct iavf_dma_mem *)hw->aq.asq.dma_head.va;
+
+ /* allocate the mapped buffers */
+ for (i = 0; i < hw->aq.num_asq_entries; i++) {
+ bi = &hw->aq.asq.r.asq_bi[i];
+ ret_code = iavf_allocate_dma_mem(hw, bi,
+ i40e_mem_asq_buf,
+ hw->aq.asq_buf_size,
+ IAVF_ADMINQ_DESC_ALIGNMENT);
+ if (ret_code)
+ goto unwind_alloc_asq_bufs;
+ }
+alloc_asq_bufs:
+ return ret_code;
+
+unwind_alloc_asq_bufs:
+ /* don't try to free the one that failed... */
+ i--;
+ for (; i >= 0; i--)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+
+ return ret_code;
+}
+
+/**
+ * i40e_free_arq_bufs - Free receive queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_arq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* free descriptors */
+ for (i = 0; i < hw->aq.num_arq_entries; i++)
+ iavf_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.arq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.arq.dma_head);
+}
+
+/**
+ * i40e_free_asq_bufs - Free send queue buffer info elements
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_free_asq_bufs(struct iavf_hw *hw)
+{
+ int i;
+
+ /* only unmap if the address is non-NULL */
+ for (i = 0; i < hw->aq.num_asq_entries; i++)
+ if (hw->aq.asq.r.asq_bi[i].pa)
+ iavf_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
+
+ /* free the buffer info list */
+ iavf_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
+
+ /* free the descriptor memory */
+ iavf_free_dma_mem(hw, &hw->aq.asq.desc_buf);
+
+ /* free the dma header */
+ iavf_free_virt_mem(hw, &hw->aq.asq.dma_head);
+}
+
+/**
+ * i40e_config_asq_regs - configure ASQ registers
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the transmit queue
+ **/
+static iavf_status i40e_config_asq_regs(struct iavf_hw *hw)
+{
+ iavf_status ret_code = 0;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+ IAVF_VF_ATQLEN1_ATQENABLE_MASK));
+ wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+ wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.asq.bal);
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_config_arq_regs - ARQ register configuration
+ * @hw: pointer to the hardware structure
+ *
+ * Configure base address and length registers for the receive (event queue)
+ **/
+static iavf_status i40e_config_arq_regs(struct iavf_hw *hw)
+{
+ iavf_status ret_code = 0;
+ u32 reg = 0;
+
+ /* Clear Head and Tail */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+
+ /* set starting point */
+ wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+ IAVF_VF_ARQLEN1_ARQENABLE_MASK));
+ wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+ wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
+
+ /* Update tail in the HW to post pre-allocated buffers */
+ wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ reg = rd32(hw, hw->aq.arq.bal);
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
+}
+
+/**
+ * i40e_init_asq - main initialization routine for ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * This is the main initialization routine for the Admin Send Queue
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static iavf_status i40e_init_asq(struct iavf_hw *hw)
+{
+ iavf_status ret_code = 0;
+
+ if (hw->aq.asq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_asq_entries == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.asq.next_to_use = 0;
+ hw->aq.asq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_asq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_asq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.asq.count = hw->aq.num_asq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_asq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_init_arq - initialize ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main initialization routine for the Admin Receive (Event) Queue.
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.arq_buf_size
+ *
+ * Do *NOT* hold the lock when calling this as the memory allocation routines
+ * called are not going to be atomic context safe
+ **/
+static iavf_status i40e_init_arq(struct iavf_hw *hw)
+{
+ iavf_status ret_code = 0;
+
+ if (hw->aq.arq.count > 0) {
+ /* queue already initialized */
+ ret_code = I40E_ERR_NOT_READY;
+ goto init_adminq_exit;
+ }
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ hw->aq.arq.next_to_use = 0;
+ hw->aq.arq.next_to_clean = 0;
+
+ /* allocate the ring memory */
+ ret_code = i40e_alloc_adminq_arq_ring(hw);
+ if (ret_code)
+ goto init_adminq_exit;
+
+ /* allocate buffers in the rings */
+ ret_code = i40e_alloc_arq_bufs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* initialize base registers */
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
+
+ /* success! */
+ hw->aq.arq.count = hw->aq.num_arq_entries;
+ goto init_adminq_exit;
+
+init_adminq_free_rings:
+ i40e_free_adminq_arq(hw);
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_asq - shutdown the ASQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Send Queue
+ **/
+static iavf_status i40e_shutdown_asq(struct iavf_hw *hw)
+{
+ iavf_status ret_code = 0;
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_asq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.asq.head, 0);
+ wr32(hw, hw->aq.asq.tail, 0);
+ wr32(hw, hw->aq.asq.len, 0);
+ wr32(hw, hw->aq.asq.bal, 0);
+ wr32(hw, hw->aq.asq.bah, 0);
+
+ hw->aq.asq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_asq_bufs(hw);
+
+shutdown_asq_out:
+ mutex_unlock(&hw->aq.asq_mutex);
+ return ret_code;
+}
+
+/**
+ * i40e_shutdown_arq - shutdown ARQ
+ * @hw: pointer to the hardware structure
+ *
+ * The main shutdown routine for the Admin Receive Queue
+ **/
+static iavf_status i40e_shutdown_arq(struct iavf_hw *hw)
+{
+ iavf_status ret_code = 0;
+
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ ret_code = I40E_ERR_NOT_READY;
+ goto shutdown_arq_out;
+ }
+
+ /* Stop firmware AdminQ processing */
+ wr32(hw, hw->aq.arq.head, 0);
+ wr32(hw, hw->aq.arq.tail, 0);
+ wr32(hw, hw->aq.arq.len, 0);
+ wr32(hw, hw->aq.arq.bal, 0);
+ wr32(hw, hw->aq.arq.bah, 0);
+
+ hw->aq.arq.count = 0; /* to indicate uninitialized queue */
+
+ /* free ring buffers */
+ i40e_free_arq_bufs(hw);
+
+shutdown_arq_out:
+ mutex_unlock(&hw->aq.arq_mutex);
+ return ret_code;
+}
+
+/**
+ * iavf_init_adminq - main initialization routine for Admin Queue
+ * @hw: pointer to the hardware structure
+ *
+ * Prior to calling this function, drivers *MUST* set the following fields
+ * in the hw->aq structure:
+ * - hw->aq.num_asq_entries
+ * - hw->aq.num_arq_entries
+ * - hw->aq.arq_buf_size
+ * - hw->aq.asq_buf_size
+ **/
+iavf_status iavf_init_adminq(struct iavf_hw *hw)
+{
+ iavf_status ret_code;
+
+ /* verify input for valid configuration */
+ if ((hw->aq.num_arq_entries == 0) ||
+ (hw->aq.num_asq_entries == 0) ||
+ (hw->aq.arq_buf_size == 0) ||
+ (hw->aq.asq_buf_size == 0)) {
+ ret_code = I40E_ERR_CONFIG;
+ goto init_adminq_exit;
+ }
+
+ /* Set up register offsets */
+ i40e_adminq_init_regs(hw);
+
+ /* setup ASQ command write back timeout */
+ hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
+ /* allocate the ASQ */
+ ret_code = i40e_init_asq(hw);
+ if (ret_code)
+ goto init_adminq_destroy_locks;
+
+ /* allocate the ARQ */
+ ret_code = i40e_init_arq(hw);
+ if (ret_code)
+ goto init_adminq_free_asq;
+
+ /* success! */
+ goto init_adminq_exit;
+
+init_adminq_free_asq:
+ i40e_shutdown_asq(hw);
+init_adminq_destroy_locks:
+
+init_adminq_exit:
+ return ret_code;
+}
+
+/**
+ * iavf_shutdown_adminq - shutdown routine for the Admin Queue
+ * @hw: pointer to the hardware structure
+ **/
+iavf_status iavf_shutdown_adminq(struct iavf_hw *hw)
+{
+ iavf_status ret_code = 0;
+
+ if (iavf_check_asq_alive(hw))
+ iavf_aq_queue_shutdown(hw, true);
+
+ i40e_shutdown_asq(hw);
+ i40e_shutdown_arq(hw);
+
+ return ret_code;
+}
+
+/**
+ * i40e_clean_asq - cleans Admin send queue
+ * @hw: pointer to the hardware structure
+ *
+ * returns the number of free desc
+ **/
+static u16 i40e_clean_asq(struct iavf_hw *hw)
+{
+ struct iavf_adminq_ring *asq = &hw->aq.asq;
+ struct i40e_asq_cmd_details *details;
+ u16 ntc = asq->next_to_clean;
+ struct i40e_aq_desc desc_cb;
+ struct i40e_aq_desc *desc;
+
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ while (rd32(hw, hw->aq.asq.head) != ntc) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
+
+ if (details->callback) {
+ I40E_ADMINQ_CALLBACK cb_func =
+ (I40E_ADMINQ_CALLBACK)details->callback;
+ desc_cb = *desc;
+ cb_func(hw, &desc_cb);
+ }
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ memset((void *)details, 0,
+ sizeof(struct i40e_asq_cmd_details));
+ ntc++;
+ if (ntc == asq->count)
+ ntc = 0;
+ desc = IAVF_ADMINQ_DESC(*asq, ntc);
+ details = I40E_ADMINQ_DETAILS(*asq, ntc);
+ }
+
+ asq->next_to_clean = ntc;
+
+ return IAVF_DESC_UNUSED(asq);
+}
+
+/**
+ * iavf_asq_done - check if FW has processed the Admin Send Queue
+ * @hw: pointer to the hw struct
+ *
+ * Returns true if the firmware has processed all descriptors on the
+ * admin send queue. Returns false if there are still requests pending.
+ **/
+bool iavf_asq_done(struct iavf_hw *hw)
+{
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ return rd32(hw, hw->aq.asq.head) == hw->aq.asq.next_to_use;
+}
+
+/**
+ * iavf_asq_send_command - send command to Admin Queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor describing the command (non DMA mem)
+ * @buff: buffer to use for indirect commands
+ * @buff_size: size of buffer for indirect commands
+ * @cmd_details: pointer to command details structure
+ *
+ * This is the main send command driver routine for the Admin Queue send
+ * queue. It runs the queue, cleans the queue, etc
+ **/
+iavf_status iavf_asq_send_command(struct iavf_hw *hw, struct i40e_aq_desc *desc,
+ void *buff, /* can be NULL */
+ u16 buff_size,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct iavf_dma_mem *dma_buff = NULL;
+ struct i40e_asq_cmd_details *details;
+ struct i40e_aq_desc *desc_on_ring;
+ bool cmd_completed = false;
+ iavf_status status = 0;
+ u16 retval = 0;
+ u32 val = 0;
+
+ mutex_lock(&hw->aq.asq_mutex);
+
+ if (hw->aq.asq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Admin queue not initialized.\n");
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ hw->aq.asq_last_status = I40E_AQ_RC_OK;
+
+ val = rd32(hw, hw->aq.asq.head);
+ if (val >= hw->aq.num_asq_entries) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: head overrun at %d\n", val);
+ status = I40E_ERR_QUEUE_EMPTY;
+ goto asq_send_command_error;
+ }
+
+ details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
+ if (cmd_details) {
+ *details = *cmd_details;
+
+ /* If the cmd_details are defined copy the cookie. The
+ * cpu_to_le32 is not needed here because the data is ignored
+ * by the FW, only used by the driver
+ */
+ if (details->cookie) {
+ desc->cookie_high =
+ cpu_to_le32(upper_32_bits(details->cookie));
+ desc->cookie_low =
+ cpu_to_le32(lower_32_bits(details->cookie));
+ }
+ } else {
+ memset(details, 0, sizeof(struct i40e_asq_cmd_details));
+ }
+
+ /* clear requested flags and then set additional flags if defined */
+ desc->flags &= ~cpu_to_le16(details->flags_dis);
+ desc->flags |= cpu_to_le16(details->flags_ena);
+
+ if (buff_size > hw->aq.asq_buf_size) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Invalid buffer size: %d.\n",
+ buff_size);
+ status = I40E_ERR_INVALID_SIZE;
+ goto asq_send_command_error;
+ }
+
+ if (details->postpone && !details->async) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Async flag not set along with postpone flag");
+ status = I40E_ERR_PARAM;
+ goto asq_send_command_error;
+ }
+
+ /* call clean and check queue available function to reclaim the
+ * descriptors that were processed by FW, the function returns the
+ * number of desc available
+ */
+ /* the clean function called here could be called in a separate thread
+ * in case of asynchronous completions
+ */
+ if (i40e_clean_asq(hw) == 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Error queue is full.\n");
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
+ goto asq_send_command_error;
+ }
+
+ /* initialize the temp desc pointer with the right desc */
+ desc_on_ring = IAVF_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
+
+ /* if the desc is available copy the temp desc to the right place */
+ *desc_on_ring = *desc;
+
+ /* if buff is not NULL assume indirect command */
+ if (buff) {
+ dma_buff = &hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use];
+ /* copy the user buff into the respective DMA buff */
+ memcpy(dma_buff->va, buff, buff_size);
+ desc_on_ring->datalen = cpu_to_le16(buff_size);
+
+ /* Update the address values in the desc with the pa value
+ * for respective buffer
+ */
+ desc_on_ring->params.external.addr_high =
+ cpu_to_le32(upper_32_bits(dma_buff->pa));
+ desc_on_ring->params.external.addr_low =
+ cpu_to_le32(lower_32_bits(dma_buff->pa));
+ }
+
+ /* bump the tail */
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
+ buff, buff_size);
+ (hw->aq.asq.next_to_use)++;
+ if (hw->aq.asq.next_to_use == hw->aq.asq.count)
+ hw->aq.asq.next_to_use = 0;
+ if (!details->postpone)
+ wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use);
+
+ /* if cmd_details are not defined or async flag is not set,
+ * we need to wait for desc write back
+ */
+ if (!details->async && !details->postpone) {
+ u32 total_delay = 0;
+
+ do {
+ /* AQ designers suggest use of head for better
+ * timing reliability than DD bit
+ */
+ if (iavf_asq_done(hw))
+ break;
+ udelay(50);
+ total_delay += 50;
+ } while (total_delay < hw->aq.asq_cmd_timeout);
+ }
+
+ /* if ready, copy the desc back to temp */
+ if (iavf_asq_done(hw)) {
+ *desc = *desc_on_ring;
+ if (buff)
+ memcpy(buff, dma_buff->va, buff_size);
+ retval = le16_to_cpu(desc->retval);
+ if (retval != 0) {
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Command completed with error 0x%X.\n",
+ retval);
+
+ /* strip off FW internal code */
+ retval &= 0xff;
+ }
+ cmd_completed = true;
+ if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
+ status = 0;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
+ else
+ status = I40E_ERR_ADMIN_QUEUE_ERROR;
+ hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
+ }
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: desc and buffer writeback:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
+
+ /* save writeback aq if requested */
+ if (details->wb_desc)
+ *details->wb_desc = *desc_on_ring;
+
+ /* update the error if time out occurred */
+ if ((!cmd_completed) &&
+ (!details->async && !details->postpone)) {
+ if (rd32(hw, hw->aq.asq.len) & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: AQ Critical error.\n");
+ status = I40E_ERR_ADMIN_QUEUE_CRITICAL_ERROR;
+ } else {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQTX: Writeback timeout.\n");
+ status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
+ }
+ }
+
+asq_send_command_error:
+ mutex_unlock(&hw->aq.asq_mutex);
+ return status;
+}
+
+/**
+ * iavf_fill_default_direct_cmd_desc - AQ descriptor helper function
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Fill the desc with default values
+ **/
+void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
+}
+
+/**
+ * iavf_clean_arq_element
+ * @hw: pointer to the hw struct
+ * @e: event info from the receive descriptor, includes any buffers
+ * @pending: number of events that could be left to process
+ *
+ * This function cleans one Admin Receive Queue element and returns
+ * the contents through e. It can also return how many events are
+ * left to process through 'pending'
+ **/
+iavf_status iavf_clean_arq_element(struct iavf_hw *hw,
+ struct i40e_arq_event_info *e,
+ u16 *pending)
+{
+ u16 ntc = hw->aq.arq.next_to_clean;
+ struct i40e_aq_desc *desc;
+ iavf_status ret_code = 0;
+ struct iavf_dma_mem *bi;
+ u16 desc_idx;
+ u16 datalen;
+ u16 flags;
+ u16 ntu;
+
+ /* pre-clean the event info */
+ memset(&e->desc, 0, sizeof(e->desc));
+
+ /* take the lock before we start messing with the ring */
+ mutex_lock(&hw->aq.arq_mutex);
+
+ if (hw->aq.arq.count == 0) {
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Admin queue not initialized.\n");
+ ret_code = I40E_ERR_QUEUE_EMPTY;
+ goto clean_arq_element_err;
+ }
+
+ /* set next_to_use to head */
+ ntu = rd32(hw, hw->aq.arq.head) & IAVF_VF_ARQH1_ARQH_MASK;
+ if (ntu == ntc) {
+ /* nothing to do - shouldn't need to update ring's values */
+ ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
+ goto clean_arq_element_out;
+ }
+
+ /* now clean the next descriptor */
+ desc = IAVF_ADMINQ_DESC(hw->aq.arq, ntc);
+ desc_idx = ntc;
+
+ hw->aq.arq_last_status =
+ (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
+ flags = le16_to_cpu(desc->flags);
+ if (flags & I40E_AQ_FLAG_ERR) {
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+ iavf_debug(hw,
+ IAVF_DEBUG_AQ_MESSAGE,
+ "AQRX: Event received with error 0x%X.\n",
+ hw->aq.arq_last_status);
+ }
+
+ e->desc = *desc;
+ datalen = le16_to_cpu(desc->datalen);
+ e->msg_len = min(datalen, e->buf_len);
+ if (e->msg_buf && (e->msg_len != 0))
+ memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+ e->msg_len);
+
+ iavf_debug(hw, IAVF_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ iavf_debug_aq(hw, IAVF_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
+ hw->aq.arq_buf_size);
+
+ /* Restore the original datalen and buffer address in the desc,
+ * FW updates datalen to indicate the event message
+ * size
+ */
+ bi = &hw->aq.arq.r.arq_bi[ntc];
+ memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
+
+ desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
+ if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
+ desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
+ desc->datalen = cpu_to_le16((u16)bi->size);
+ desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
+ desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
+
+ /* set tail = the last cleaned desc index. */
+ wr32(hw, hw->aq.arq.tail, ntc);
+ /* ntc is updated to tail + 1 */
+ ntc++;
+ if (ntc == hw->aq.num_arq_entries)
+ ntc = 0;
+ hw->aq.arq.next_to_clean = ntc;
+ hw->aq.arq.next_to_use = ntu;
+
+clean_arq_element_out:
+ /* Set pending if needed, unlock and return */
+ if (pending)
+ *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
+
+clean_arq_element_err:
+ mutex_unlock(&hw->aq.arq_mutex);
+
+ return ret_code;
+}
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _IAVF_ADMINQ_H_
+#define _IAVF_ADMINQ_H_
+
+#include "iavf_osdep.h"
+#include "iavf_status.h"
+#include "iavf_adminq_cmd.h"
+
+#define IAVF_ADMINQ_DESC(R, i) \
+ (&(((struct i40e_aq_desc *)((R).desc_buf.va))[i]))
+
+#define IAVF_ADMINQ_DESC_ALIGNMENT 4096
+
+struct iavf_adminq_ring {
+ struct iavf_virt_mem dma_head; /* space for dma structures */
+ struct iavf_dma_mem desc_buf; /* descriptor ring memory */
+ struct iavf_virt_mem cmd_buf; /* command buffer memory */
+
+ union {
+ struct iavf_dma_mem *asq_bi;
+ struct iavf_dma_mem *arq_bi;
+ } r;
+
+ u16 count; /* Number of descriptors */
+ u16 rx_buf_len; /* Admin Receive Queue buffer length */
+
+ /* used for interrupt processing */
+ u16 next_to_use;
+ u16 next_to_clean;
+
+ /* used for queue tracking */
+ u32 head;
+ u32 tail;
+ u32 len;
+ u32 bah;
+ u32 bal;
+};
+
+/* ASQ transaction details */
+struct i40e_asq_cmd_details {
+ void *callback; /* cast from type I40E_ADMINQ_CALLBACK */
+ u64 cookie;
+ u16 flags_ena;
+ u16 flags_dis;
+ bool async;
+ bool postpone;
+ struct i40e_aq_desc *wb_desc;
+};
+
+#define I40E_ADMINQ_DETAILS(R, i) \
+ (&(((struct i40e_asq_cmd_details *)((R).cmd_buf.va))[i]))
+
+/* ARQ event information */
+struct i40e_arq_event_info {
+ struct i40e_aq_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+/* Admin Queue information */
+struct iavf_adminq_info {
+ struct iavf_adminq_ring arq; /* receive queue */
+ struct iavf_adminq_ring asq; /* send queue */
+ u32 asq_cmd_timeout; /* send queue cmd write back timeout*/
+ u16 num_arq_entries; /* receive queue depth */
+ u16 num_asq_entries; /* send queue depth */
+ u16 arq_buf_size; /* receive queue buffer size */
+ u16 asq_buf_size; /* send queue buffer size */
+ u16 fw_maj_ver; /* firmware major version */
+ u16 fw_min_ver; /* firmware minor version */
+ u32 fw_build; /* firmware build number */
+ u16 api_maj_ver; /* api major version */
+ u16 api_min_ver; /* api minor version */
+
+ struct mutex asq_mutex; /* Send queue lock */
+ struct mutex arq_mutex; /* Receive queue lock */
+
+ /* last status values on send and receive queues */
+ enum i40e_admin_queue_err asq_last_status;
+ enum i40e_admin_queue_err arq_last_status;
+};
+
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_ret: AdminQ handler error code can override aq_rc
+ * aq_rc: AdminQ firmware error code to convert
+ **/
+static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc)
+{
+ int aq_to_posix[] = {
+ 0, /* I40E_AQ_RC_OK */
+ -EPERM, /* I40E_AQ_RC_EPERM */
+ -ENOENT, /* I40E_AQ_RC_ENOENT */
+ -ESRCH, /* I40E_AQ_RC_ESRCH */
+ -EINTR, /* I40E_AQ_RC_EINTR */
+ -EIO, /* I40E_AQ_RC_EIO */
+ -ENXIO, /* I40E_AQ_RC_ENXIO */
+ -E2BIG, /* I40E_AQ_RC_E2BIG */
+ -EAGAIN, /* I40E_AQ_RC_EAGAIN */
+ -ENOMEM, /* I40E_AQ_RC_ENOMEM */
+ -EACCES, /* I40E_AQ_RC_EACCES */
+ -EFAULT, /* I40E_AQ_RC_EFAULT */
+ -EBUSY, /* I40E_AQ_RC_EBUSY */
+ -EEXIST, /* I40E_AQ_RC_EEXIST */
+ -EINVAL, /* I40E_AQ_RC_EINVAL */
+ -ENOTTY, /* I40E_AQ_RC_ENOTTY */
+ -ENOSPC, /* I40E_AQ_RC_ENOSPC */
+ -ENOSYS, /* I40E_AQ_RC_ENOSYS */
+ -ERANGE, /* I40E_AQ_RC_ERANGE */
+ -EPIPE, /* I40E_AQ_RC_EFLUSHED */
+ -ESPIPE, /* I40E_AQ_RC_BAD_ADDR */
+ -EROFS, /* I40E_AQ_RC_EMODE */
+ -EFBIG, /* I40E_AQ_RC_EFBIG */
+ };
+
+ /* aq_rc is invalid if AQ timed out */
+ if (aq_ret == I40E_ERR_ADMIN_QUEUE_TIMEOUT)
+ return -EAGAIN;
+
+ if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0]))))
+ return -ERANGE;
+
+ return aq_to_posix[aq_rc];
+}
+
+/* general information */
+#define I40E_AQ_LARGE_BUF 512
+#define I40E_ASQ_CMD_TIMEOUT 250000 /* usecs */
+
+void iavf_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc, u16 opcode);
+
+#endif /* _IAVF_ADMINQ_H_ */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright(c) 2013 - 2018 Intel Corporation. */
+
+#ifndef _I40E_ADMINQ_CMD_H_
+#define _I40E_ADMINQ_CMD_H_
+
+/* This header file defines the i40e Admin Queue commands and is shared between
+ * i40e Firmware and Software. Do not change the names in this file to IAVF
+ * because this file should be diff-able against the i40e version, even
+ * though many parts have been removed in this VF version.
+ *
+ * This file needs to comply with the Linux Kernel coding style.
+ */
+
+#define I40E_FW_API_VERSION_MAJOR 0x0001
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
+#define I40E_FW_API_VERSION_MINOR_X710 0x0008
+
+#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
+ I40E_FW_API_VERSION_MINOR_X710 : \
+ I40E_FW_API_VERSION_MINOR_X722)
+
+/* API version 1.7 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+
+struct i40e_aq_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 param2;
+ __le32 param3;
+ } internal;
+ struct {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+ } external;
+ u8 raw[16];
+ } params;
+};
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets*/
+#define I40E_AQ_FLAG_DD_SHIFT 0
+#define I40E_AQ_FLAG_CMP_SHIFT 1
+#define I40E_AQ_FLAG_ERR_SHIFT 2
+#define I40E_AQ_FLAG_VFE_SHIFT 3
+#define I40E_AQ_FLAG_LB_SHIFT 9
+#define I40E_AQ_FLAG_RD_SHIFT 10
+#define I40E_AQ_FLAG_VFC_SHIFT 11
+#define I40E_AQ_FLAG_BUF_SHIFT 12
+#define I40E_AQ_FLAG_SI_SHIFT 13
+#define I40E_AQ_FLAG_EI_SHIFT 14
+#define I40E_AQ_FLAG_FE_SHIFT 15
+
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+
+/* error codes */
+enum i40e_admin_queue_err {
+ I40E_AQ_RC_OK = 0, /* success */
+ I40E_AQ_RC_EPERM = 1, /* Operation not permitted */
+ I40E_AQ_RC_ENOENT = 2, /* No such element */
+ I40E_AQ_RC_ESRCH = 3, /* Bad opcode */
+ I40E_AQ_RC_EINTR = 4, /* operation interrupted */
+ I40E_AQ_RC_EIO = 5, /* I/O error */
+ I40E_AQ_RC_ENXIO = 6, /* No such resource */
+ I40E_AQ_RC_E2BIG = 7, /* Arg too long */
+ I40E_AQ_RC_EAGAIN = 8, /* Try again */
+ I40E_AQ_RC_ENOMEM = 9, /* Out of memory */
+ I40E_AQ_RC_EACCES = 10, /* Permission denied */
+ I40E_AQ_RC_EFAULT = 11, /* Bad address */
+ I40E_AQ_RC_EBUSY = 12, /* Device or resource busy */
+ I40E_AQ_RC_EEXIST = 13, /* object already exists */
+ I40E_AQ_RC_EINVAL = 14, /* Invalid argument */
+ I40E_AQ_RC_ENOTTY = 15, /* Not a typewriter */
+ I40E_AQ_RC_ENOSPC = 16, /* No space left or alloc failure */
+ I40E_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ I40E_AQ_RC_ERANGE = 18, /* Parameter out of range */
+ I40E_AQ_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ I40E_AQ_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ I40E_AQ_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ I40E_AQ_RC_EFBIG = 22, /* File too large */
+};
+
+/* Admin Queue command opcodes */
+enum i40e_admin_queue_opc {
+ /* aq commands */
+ i40e_aqc_opc_get_version = 0x0001,
+ i40e_aqc_opc_driver_version = 0x0002,
+ i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
+
+ /* resource ownership */
+ i40e_aqc_opc_request_resource = 0x0008,
+ i40e_aqc_opc_release_resource = 0x0009,
+
+ i40e_aqc_opc_list_func_capabilities = 0x000A,
+ i40e_aqc_opc_list_dev_capabilities = 0x000B,
+
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
+ /* LAA */
+ i40e_aqc_opc_mac_address_read = 0x0107,
+ i40e_aqc_opc_mac_address_write = 0x0108,
+
+ /* PXE */
+ i40e_aqc_opc_clear_pxe_mode = 0x0110,
+
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
+ /* internal switch commands */
+ i40e_aqc_opc_get_switch_config = 0x0200,
+ i40e_aqc_opc_add_statistics = 0x0201,
+ i40e_aqc_opc_remove_statistics = 0x0202,
+ i40e_aqc_opc_set_port_parameters = 0x0203,
+ i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
+ i40e_aqc_opc_set_switch_config = 0x0205,
+ i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
+ i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
+
+ i40e_aqc_opc_add_vsi = 0x0210,
+ i40e_aqc_opc_update_vsi_parameters = 0x0211,
+ i40e_aqc_opc_get_vsi_parameters = 0x0212,
+
+ i40e_aqc_opc_add_pv = 0x0220,
+ i40e_aqc_opc_update_pv_parameters = 0x0221,
+ i40e_aqc_opc_get_pv_parameters = 0x0222,
+
+ i40e_aqc_opc_add_veb = 0x0230,
+ i40e_aqc_opc_update_veb_parameters = 0x0231,
+ i40e_aqc_opc_get_veb_parameters = 0x0232,
+
+ i40e_aqc_opc_delete_element = 0x0243,
+
+ i40e_aqc_opc_add_macvlan = 0x0250,
+ i40e_aqc_opc_remove_macvlan = 0x0251,
+ i40e_aqc_opc_add_vlan = 0x0252,
+ i40e_aqc_opc_remove_vlan = 0x0253,
+ i40e_aqc_opc_set_vsi_promiscuous_modes = 0x0254,
+ i40e_aqc_opc_add_tag = 0x0255,
+ i40e_aqc_opc_remove_tag = 0x0256,
+ i40e_aqc_opc_add_multicast_etag = 0x0257,
+ i40e_aqc_opc_remove_multicast_etag = 0x0258,
+ i40e_aqc_opc_update_tag = 0x0259,
+ i40e_aqc_opc_add_control_packet_filter = 0x025A,
+ i40e_aqc_opc_remove_control_packet_filter = 0x025B,
+ i40e_aqc_opc_add_cloud_filters = 0x025C,
+ i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+
+ i40e_aqc_opc_add_mirror_rule = 0x0260,
+ i40e_aqc_opc_delete_mirror_rule = 0x0261,
+
+ /* Dynamic Device Personalization */
+ i40e_aqc_opc_write_personalization_profile = 0x0270,
+ i40e_aqc_opc_get_personalization_profile_list = 0x0271,
+
+ /* DCB commands */
+ i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
+ i40e_aqc_opc_dcb_updated = 0x0302,
+ i40e_aqc_opc_set_dcb_parameters = 0x0303,
+
+ /* TX scheduler */
+ i40e_aqc_opc_configure_vsi_bw_limit = 0x0400,
+ i40e_aqc_opc_configure_vsi_ets_sla_bw_limit = 0x0406,
+ i40e_aqc_opc_configure_vsi_tc_bw = 0x0407,
+ i40e_aqc_opc_query_vsi_bw_config = 0x0408,
+ i40e_aqc_opc_query_vsi_ets_sla_config = 0x040A,
+ i40e_aqc_opc_configure_switching_comp_bw_limit = 0x0410,
+
+ i40e_aqc_opc_enable_switching_comp_ets = 0x0413,
+ i40e_aqc_opc_modify_switching_comp_ets = 0x0414,
+ i40e_aqc_opc_disable_switching_comp_ets = 0x0415,
+ i40e_aqc_opc_configure_switching_comp_ets_bw_limit = 0x0416,
+ i40e_aqc_opc_configure_switching_comp_bw_config = 0x0417,
+ i40e_aqc_opc_query_switching_comp_ets_config = 0x0418,
+ i40e_aqc_opc_query_port_ets_config = 0x0419,
+ i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
+ i40e_aqc_opc_suspend_port_tx = 0x041B,
+ i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
+ /* hmc */
+ i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
+ i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
+
+ /* phy commands*/
+ i40e_aqc_opc_get_phy_abilities = 0x0600,
+ i40e_aqc_opc_set_phy_config = 0x0601,
+ i40e_aqc_opc_set_mac_config = 0x0603,
+ i40e_aqc_opc_set_link_restart_an = 0x0605,
+ i40e_aqc_opc_get_link_status = 0x0607,
+ i40e_aqc_opc_set_phy_int_mask = 0x0613,
+ i40e_aqc_opc_get_local_advt_reg = 0x0614,
+ i40e_aqc_opc_set_local_advt_reg = 0x0615,
+ i40e_aqc_opc_get_partner_advt = 0x0616,
+ i40e_aqc_opc_set_lb_modes = 0x0618,
+ i40e_aqc_opc_get_phy_wol_caps = 0x0621,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
+ i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
+ i40e_aqc_opc_run_phy_activity = 0x0626,
+ i40e_aqc_opc_set_phy_register = 0x0628,
+ i40e_aqc_opc_get_phy_register = 0x0629,
+
+ /* NVM commands */
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
+ i40e_aqc_opc_oem_post_update = 0x0720,
+ i40e_aqc_opc_thermal_sensor = 0x0721,
+
+ /* virtualization commands */
+ i40e_aqc_opc_send_msg_to_pf = 0x0801,
+ i40e_aqc_opc_send_msg_to_vf = 0x0802,
+ i40e_aqc_opc_send_msg_to_peer = 0x0803,
+
+ /* alternate structure */
+ i40e_aqc_opc_alternate_write = 0x0900,
+ i40e_aqc_opc_alternate_write_indirect = 0x0901,
+ i40e_aqc_opc_alternate_read = 0x0902,
+ i40e_aqc_opc_alternate_read_indirect = 0x0903,
+ i40e_aqc_opc_alternate_write_done = 0x0904,
+ i40e_aqc_opc_alternate_set_mode = 0x0905,
+ i40e_aqc_opc_alternate_clear_port = 0x0906,
+
+ /* LLDP commands */
+ i40e_aqc_opc_lldp_get_mib = 0x0A00,
+ i40e_aqc_opc_lldp_update_mib = 0x0A01,
+ i40e_aqc_opc_lldp_add_tlv = 0x0A02,
+ i40e_aqc_opc_lldp_update_tlv = 0x0A03,
+ i40e_aqc_opc_lldp_delete_tlv = 0x0A04,
+ i40e_aqc_opc_lldp_stop = 0x0A05,
+ i40e_aqc_opc_lldp_start = 0x0A06,
+
+ /* Tunnel commands */
+ i40e_aqc_opc_add_udp_tunnel = 0x0B00,
+ i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
+
+ /* Async Events */
+ i40e_aqc_opc_event_lan_overflow = 0x1001,
+
+ /* OEM commands */
+ i40e_aqc_opc_oem_parameter_change = 0xFE00,
+ i40e_aqc_opc_oem_device_status_change = 0xFE01,
+ i40e_aqc_opc_oem_ocsd_initialize = 0xFE02,
+ i40e_aqc_opc_oem_ocbb_initialize = 0xFE03,
+
+ /* debug commands */
+ i40e_aqc_opc_debug_read_reg = 0xFF03,
+ i40e_aqc_opc_debug_write_reg = 0xFF04,
+ i40e_aqc_opc_debug_modify_reg = 0xFF07,
+ i40e_aqc_opc_debug_dump_internals = 0xFF08,
+};
+
+/* command structures and indirect data structures */
+
+/* Structure naming conventions:
+ * - no suffix for direct command descriptor structures
+ * - _data for indirect sent data
+ * - _resp for indirect return data (data which is both will use _data)
+ * - _completion for direct return data
+ * - _element_ for repeated elements (may also be _data or _resp)
+ *
+ * Command structures are expected to overlay the params.raw member of the basic
+ * descriptor, and as such cannot exceed 16 bytes in length.
+ */
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define I40E_CHECK_STRUCT_LEN(n, X) enum i40e_static_assert_enum_##X \
+ { i40e_static_assert_##X = (n)/((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used extensively to ensure that command structures are 16
+ * bytes in length as they have to map to the raw array of that size.
+ */
+#define I40E_CHECK_CMD_LENGTH(X) I40E_CHECK_STRUCT_LEN(16, X)
+
+/* Queue Shutdown (direct 0x0003) */
+struct i40e_aqc_queue_shutdown {
+ __le32 driver_unloading;
+#define I40E_AQ_DRIVER_UNLOADING 0x1
+ u8 reserved[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+
+struct i40e_aqc_vsi_properties_data {
+ /* first 96 byte are written by SW */
+ __le16 valid_sections;
+#define I40E_AQ_VSI_PROP_SWITCH_VALID 0x0001
+#define I40E_AQ_VSI_PROP_SECURITY_VALID 0x0002
+#define I40E_AQ_VSI_PROP_VLAN_VALID 0x0004
+#define I40E_AQ_VSI_PROP_CAS_PV_VALID 0x0008
+#define I40E_AQ_VSI_PROP_INGRESS_UP_VALID 0x0010
+#define I40E_AQ_VSI_PROP_EGRESS_UP_VALID 0x0020
+#define I40E_AQ_VSI_PROP_QUEUE_MAP_VALID 0x0040
+#define I40E_AQ_VSI_PROP_QUEUE_OPT_VALID 0x0080
+#define I40E_AQ_VSI_PROP_OUTER_UP_VALID 0x0100
+#define I40E_AQ_VSI_PROP_SCHED_VALID 0x0200
+ /* switch section */
+ __le16 switch_id; /* 12bit id combined with flags below */
+#define I40E_AQ_VSI_SW_ID_SHIFT 0x0000
+#define I40E_AQ_VSI_SW_ID_MASK (0xFFF << I40E_AQ_VSI_SW_ID_SHIFT)
+#define I40E_AQ_VSI_SW_ID_FLAG_NOT_STAG 0x1000
+#define I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB 0x2000
+#define I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB 0x4000
+ u8 sw_reserved[2];
+ /* security section */
+ u8 sec_flags;
+#define I40E_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD 0x01
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK 0x02
+#define I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK 0x04
+ u8 sec_reserved;
+ /* VLAN section */
+ __le16 pvid; /* VLANS include priority bits */
+ __le16 fcoe_pvid;
+ u8 port_vlan_flags;
+#define I40E_AQ_VSI_PVLAN_MODE_SHIFT 0x00
+#define I40E_AQ_VSI_PVLAN_MODE_MASK (0x03 << \
+ I40E_AQ_VSI_PVLAN_MODE_SHIFT)
+#define I40E_AQ_VSI_PVLAN_MODE_TAGGED 0x01
+#define I40E_AQ_VSI_PVLAN_MODE_UNTAGGED 0x02
+#define I40E_AQ_VSI_PVLAN_MODE_ALL 0x03
+#define I40E_AQ_VSI_PVLAN_INSERT_PVID 0x04
+#define I40E_AQ_VSI_PVLAN_EMOD_SHIFT 0x03
+#define I40E_AQ_VSI_PVLAN_EMOD_MASK (0x3 << \
+ I40E_AQ_VSI_PVLAN_EMOD_SHIFT)
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH 0x0
+#define I40E_AQ_VSI_PVLAN_EMOD_STR_UP 0x08
+#define I40E_AQ_VSI_PVLAN_EMOD_STR 0x10
+#define I40E_AQ_VSI_PVLAN_EMOD_NOTHING 0x18
+ u8 pvlan_reserved[3];
+ /* ingress egress up sections */
+ __le32 ingress_table; /* bitmap, 3 bits per up */
+#define I40E_AQ_VSI_UP_TABLE_UP0_SHIFT 0
+#define I40E_AQ_VSI_UP_TABLE_UP0_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP0_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP1_SHIFT 3
+#define I40E_AQ_VSI_UP_TABLE_UP1_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP1_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP2_SHIFT 6
+#define I40E_AQ_VSI_UP_TABLE_UP2_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP2_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP3_SHIFT 9
+#define I40E_AQ_VSI_UP_TABLE_UP3_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP3_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP4_SHIFT 12
+#define I40E_AQ_VSI_UP_TABLE_UP4_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP4_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP5_SHIFT 15
+#define I40E_AQ_VSI_UP_TABLE_UP5_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP5_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP6_SHIFT 18
+#define I40E_AQ_VSI_UP_TABLE_UP6_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP6_SHIFT)
+#define I40E_AQ_VSI_UP_TABLE_UP7_SHIFT 21
+#define I40E_AQ_VSI_UP_TABLE_UP7_MASK (0x7 << \
+ I40E_AQ_VSI_UP_TABLE_UP7_SHIFT)
+ __le32 egress_table; /* same defines as for ingress table */
+ /* cascaded PV section */
+ __le16 cas_pv_tag;
+ u8 cas_pv_flags;
+#define I40E_AQ_VSI_CAS_PV_TAGX_SHIFT 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_MASK (0x03 << \
+ I40E_AQ_VSI_CAS_PV_TAGX_SHIFT)
+#define I40E_AQ_VSI_CAS_PV_TAGX_LEAVE 0x00
+#define I40E_AQ_VSI_CAS_PV_TAGX_REMOVE 0x01
+#define I40E_AQ_VSI_CAS_PV_TAGX_COPY 0x02
+#define I40E_AQ_VSI_CAS_PV_INSERT_TAG 0x10
+#define I40E_AQ_VSI_CAS_PV_ETAG_PRUNE 0x20
+#define I40E_AQ_VSI_CAS_PV_ACCEPT_HOST_TAG 0x40
+ u8 cas_pv_reserved;
+ /* queue mapping section */
+ __le16 mapping_flags;
+#define I40E_AQ_VSI_QUE_MAP_CONTIG 0x0
+#define I40E_AQ_VSI_QUE_MAP_NONCONTIG 0x1
+ __le16 queue_mapping[16];
+#define I40E_AQ_VSI_QUEUE_SHIFT 0x0
+#define I40E_AQ_VSI_QUEUE_MASK (0x7FF << I40E_AQ_VSI_QUEUE_SHIFT)
+ __le16 tc_mapping[8];
+#define I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT 0
+#define I40E_AQ_VSI_TC_QUE_OFFSET_MASK (0x1FF << \
+ I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+#define I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT 9
+#define I40E_AQ_VSI_TC_QUE_NUMBER_MASK (0x7 << \
+ I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ /* queueing option section */
+ u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
+#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
+#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
+ u8 queueing_opt_reserved[3];
+ /* scheduler section */
+ u8 up_enable_bits;
+ u8 sched_reserved;
+ /* outer up section */
+ __le32 outer_up_table; /* same structure and defines as ingress tbl */
+ u8 cmd_reserved[8];
+ /* last 32 bytes are written by FW */
+ __le16 qs_handle[8];
+#define I40E_AQ_VSI_QS_HANDLE_INVALID 0xFFFF
+ __le16 stat_counter_idx;
+ __le16 sched_id;
+ u8 resp_reserved[12];
+};
+
+I40E_CHECK_STRUCT_LEN(128, i40e_aqc_vsi_properties_data);
+
+/* Get VEB Parameters (direct 0x0232)
+ * uses i40e_aqc_switch_seid for the descriptor
+ */
+struct i40e_aqc_get_veb_parameters_completion {
+ __le16 seid;
+ __le16 switch_id;
+ __le16 veb_flags; /* only the first/last flags from 0x0230 is valid */
+ __le16 statistic_index;
+ __le16 vebs_used;
+ __le16 vebs_free;
+ u8 reserved[4];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_veb_parameters_completion);
+
+#define I40E_LINK_SPEED_100MB_SHIFT 0x1
+#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
+#define I40E_LINK_SPEED_10GB_SHIFT 0x3
+#define I40E_LINK_SPEED_40GB_SHIFT 0x4
+#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
+
+enum i40e_aq_link_speed {
+ I40E_LINK_SPEED_UNKNOWN = 0,
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = BIT(I40E_LINK_SPEED_25GB_SHIFT),
+};
+
+/* Send to PF command (indirect 0x0801) id is only used by PF
+ * Send to VF command (indirect 0x0802) id is only used by PF
+ * Send to Peer PF command (indirect 0x0803)
+ */
+struct i40e_aqc_pf_vf_message {
+ __le32 id;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_pf_vf_message);
+
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+#endif /* _I40E_ADMINQ_CMD_H_ */
/* Copyright(c) 2013 - 2018 Intel Corporation. */
#include "iavf_type.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_prototype.h"
#include <linux/avf/virtchnl.h>
#include "iavf_status.h"
#include "iavf_osdep.h"
#include "iavf_register.h"
-#include "i40e_adminq.h"
+#include "iavf_adminq.h"
#include "iavf_devids.h"
#define IAVF_RXQ_CTX_DBUFF_SHIFT 7