* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
+ * Copyright(c) 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#define IWL_DEFAULT_QUEUE_SIZE 256
#define IWL_MGMT_QUEUE_SIZE 16
+#define IWL_CMD_QUEUE_SIZE 32
/**
* struct iwl_tx_queue_cfg_cmd - txq hw scheduler config command
* @sta_id: station id
void *iml_img;
u32 control_flags = 0;
int ret;
- int cmdq_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
+ int cmdq_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->cfg->min_txq_size);
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
ctxt_info->hcmd_cfg.cmd_queue_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info->hcmd_cfg.cmd_queue_size =
- TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
+ TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
u32 tbs;
};
-
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
/*
* The FH will write back to the first TB only, so we need to copy some data
* into the buffer regardless of whether it should be mapped or not.
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- int queue_size = max_t(u32, TFD_CMD_SLOTS, trans->cfg->min_txq_size);
+ int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
+ trans->cfg->min_txq_size);
/* TODO: most of the logic can be removed in A0 - but not in Z0 */
spin_lock(&trans_pcie->irq_lock);
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
if (cmd_queue)
- slots_num = max_t(u32, TFD_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
else
- slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
if (cmd_queue)
- slots_num = max_t(u32, TFD_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
trans->cfg->min_txq_size);
else
- slots_num = max_t(u32, TFD_TX_CMD_SLOTS,
+ slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
trans->cfg->min_256_ba_txq_size);
ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
slots_num, cmd_queue);