@@ -330,7 +330,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
*/
trans->reduce_power_loaded = true;
} else {
- ret = iwl_trans_load_reduce_power(trans, &pnvm_data);
+ ret = iwl_trans_load_reduce_power(trans, &pnvm_data, capa);
if (ret) {
IWL_DEBUG_FW(trans,
"Failed to load reduce power table %d\n",
@@ -340,7 +340,7 @@ int iwl_pnvm_load(struct iwl_trans *trans,
kfree(data);
}
}
- iwl_trans_set_reduce_power(trans);
+ iwl_trans_set_reduce_power(trans, capa);
iwl_init_notification_wait(notif_wait, &pnvm_wait,
ntf_cmds, ARRAY_SIZE(ntf_cmds),
@@ -98,9 +98,9 @@ struct iwl_prph_scratch_control {
} __packed; /* PERIPH_SCRATCH_CONTROL_S */
/*
- * struct iwl_prph_scratch_pnvm_cfg - ror config
+ * struct iwl_prph_scratch_pnvm_cfg - PNVM scratch
* @pnvm_base_addr: PNVM start address
- * @pnvm_size: PNVM size in DWs
+ * @pnvm_size: the size of the PNVM image in bytes
* @reserved: reserved
*/
struct iwl_prph_scratch_pnvm_cfg {
@@ -142,7 +142,7 @@ struct iwl_prph_scratch_rbd_cfg {
/*
* struct iwl_prph_scratch_uefi_cfg - prph scratch reduce power table
* @base_addr: reduce power table address
- * @size: table size in dwords
+ * @size: the size of the entire power table image
*/
struct iwl_prph_scratch_uefi_cfg {
__le64 base_addr;
@@ -292,10 +292,13 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
-int iwl_trans_pcie_ctx_info_gen3_load_reduce_power
- (struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads);
-void iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans);
+int
+iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
+void
+iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
int iwl_trans_pcie_ctx_info_gen3_set_step(struct iwl_trans *trans,
u32 mbx_addr_0_step, u32 mbx_addr_1_step);
#endif /* __iwl_context_info_file_gen3_h__ */
@@ -641,8 +641,10 @@ struct iwl_trans_ops {
void (*set_pnvm)(struct iwl_trans *trans,
const struct iwl_ucode_capabilities *capa);
int (*load_reduce_power)(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads);
- void (*set_reduce_power)(struct iwl_trans *trans);
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa);
+ void (*set_reduce_power)(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa);
void (*interrupts)(struct iwl_trans *trans, bool enable);
int (*imr_dma_data)(struct iwl_trans *trans,
@@ -731,6 +733,19 @@ struct iwl_dram_data {
int size;
};
+/**
+ * @drams: array of several DRAM areas that contains the pnvm and power
+ * reduction table payloads.
+ * @n_regions: number of DRAM regions that were allocated
+ * @prph_scratch_mem_desc: points to a structure allocated in dram,
+ * designed to show FW where all the payloads are.
+ */
+struct iwl_dram_regions {
+ struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
+ struct iwl_dram_data prph_scratch_mem_desc;
+ u8 n_regions;
+};
+
/**
* struct iwl_fw_mon - fw monitor per allocation id
* @num_frags: number of fragments
@@ -1560,15 +1575,18 @@ static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
static inline int iwl_trans_load_reduce_power
(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads)
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
{
- return trans->ops->load_reduce_power(trans, payloads);
+ return trans->ops->load_reduce_power(trans, payloads, capa);
}
-static inline void iwl_trans_set_reduce_power(struct iwl_trans *trans)
+static inline void
+iwl_trans_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
{
if (trans->ops->set_reduce_power)
- trans->ops->set_reduce_power(trans);
+ trans->ops->set_reduce_power(trans, capa);
}
static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2018-2022 Intel Corporation
+ * Copyright (C) 2018-2023 Intel Corporation
*/
#include "iwl-trans.h"
#include "iwl-fh.h"
@@ -317,11 +317,11 @@ static int iwl_pcie_load_payloads_continuously(struct iwl_trans *trans,
static int iwl_pcie_load_payloads_segments
(struct iwl_trans *trans,
+ struct iwl_dram_regions *dram_regions,
const struct iwl_pnvm_image *pnvm_data)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_dram_data *cur_pnvm_dram = &trans_pcie->pnvm_dram[0],
- *desc_dram = &trans_pcie->pnvm_regions_desc_array;
+ struct iwl_dram_data *cur_payload_dram = &dram_regions->drams[0];
+ struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
struct iwl_prph_scrath_mem_desc_addr_array *addresses;
const void *data;
u32 len;
@@ -341,30 +341,31 @@ static int iwl_pcie_load_payloads_segments
memset(desc_dram->block, 0, len);
/* allocate DRAM region for each payload */
- trans_pcie->n_pnvm_regions = 0;
+ dram_regions->n_regions = 0;
for (i = 0; i < pnvm_data->n_chunks; i++) {
len = pnvm_data->chunks[i].len;
data = pnvm_data->chunks[i].data;
- if (iwl_pcie_ctxt_info_alloc_dma(trans, data, len,
- cur_pnvm_dram)) {
- iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev);
+ if (iwl_pcie_ctxt_info_alloc_dma(trans,
+ data,
+ len,
+ cur_payload_dram)) {
+ iwl_trans_pcie_free_pnvm_dram_regions(dram_regions,
+ trans->dev);
return -ENOMEM;
}
- trans_pcie->n_pnvm_regions++;
- cur_pnvm_dram++;
+ dram_regions->n_regions++;
+ cur_payload_dram++;
}
/* fill desc with the DRAM payloads addresses */
addresses = desc_dram->block;
-
for (i = 0; i < pnvm_data->n_chunks; i++) {
addresses->mem_descs[i] =
- cpu_to_le64(trans_pcie->pnvm_dram[i].physical);
+ cpu_to_le64(dram_regions->drams[i].physical);
}
- trans->pnvm_loaded = true;
return 0;
}
@@ -376,7 +377,7 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_data *dram = &trans_pcie->pnvm_dram[0];
+ struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
int ret = 0;
/* only allocate the DRAM if not allocated yet */
@@ -394,28 +395,51 @@ int iwl_trans_pcie_ctx_info_gen3_load_pnvm(struct iwl_trans *trans,
return -EINVAL;
}
- /* allocate several DRAM sections */
- if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
- return iwl_pcie_load_payloads_segments(trans, pnvm_payloads);
-
- /* allocate one DRAM section */
- ret = iwl_pcie_load_payloads_continuously(trans, pnvm_payloads, dram);
- if (!ret) {
- trans_pcie->n_pnvm_regions = 1;
- trans->pnvm_loaded = true;
+ /* save payloads in several DRAM sections */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
+ ret = iwl_pcie_load_payloads_segments(trans,
+ dram_regions,
+ pnvm_payloads);
+ if (!ret)
+ trans->pnvm_loaded = true;
+ } else {
+ /* save only in one DRAM section */
+ ret = iwl_pcie_load_payloads_continuously
+ (trans,
+ pnvm_payloads,
+ &dram_regions->drams[0]);
+ if (!ret) {
+ dram_regions->n_regions = 1;
+ trans->pnvm_loaded = true;
+ }
}
return ret;
}
+static inline size_t
+iwl_dram_regions_size(const struct iwl_dram_regions *dram_regions)
+{
+ size_t total_size = 0;
+ int i;
+
+ for (i = 0; i < dram_regions->n_regions; i++)
+ total_size += dram_regions->drams[i].size;
+
+ return total_size;
+}
+
static void iwl_pcie_set_pnvm_segments(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->pnvm_data;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
- cpu_to_le64(trans_pcie->pnvm_regions_desc_array.physical);
+ cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
+ prph_sc_ctrl->pnvm_cfg.pnvm_size =
+ cpu_to_le32(iwl_dram_regions_size(dram_regions));
}
static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
@@ -425,9 +449,9 @@ static void iwl_pcie_set_continuous_pnvm(struct iwl_trans *trans)
&trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->pnvm_cfg.pnvm_base_addr =
- cpu_to_le64(trans_pcie->pnvm_dram[0].physical);
+ cpu_to_le64(trans_pcie->pnvm_data.drams[0].physical);
prph_sc_ctrl->pnvm_cfg.pnvm_size =
- cpu_to_le32(trans_pcie->pnvm_dram[0].size);
+ cpu_to_le32(trans_pcie->pnvm_data.drams[0].size);
}
void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
@@ -443,12 +467,18 @@ void iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans,
}
int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
- const struct iwl_pnvm_image *payloads)
+ const struct iwl_pnvm_image *payloads,
+ const struct iwl_ucode_capabilities *capa)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
- struct iwl_dram_data *dram = &trans_pcie->reduce_power_dram;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
+ int ret = 0;
+
+ /* only allocate the DRAM if not allocated yet */
+ if (trans->reduce_power_loaded)
+ return 0;
if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
return 0;
@@ -456,26 +486,68 @@ int iwl_trans_pcie_ctx_info_gen3_load_reduce_power(struct iwl_trans *trans,
if (WARN_ON(prph_sc_ctrl->reduce_power_cfg.size))
return -EBUSY;
- /* only allocate the DRAM if not allocated yet */
- if (!trans->reduce_power_loaded)
- return iwl_pcie_load_payloads_continuously(trans,
- payloads,
- dram);
- return 0;
+ if (!payloads->n_chunks) {
+ IWL_DEBUG_FW(trans, "no payloads\n");
+ return -EINVAL;
+ }
+
+ /* save payloads in several DRAM sections */
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG)) {
+ ret = iwl_pcie_load_payloads_segments(trans,
+ dram_regions,
+ payloads);
+ if (!ret)
+ trans->reduce_power_loaded = true;
+ } else {
+ /* save only in one DRAM section */
+ ret = iwl_pcie_load_payloads_continuously
+ (trans,
+ payloads,
+ &dram_regions->drams[0]);
+ if (!ret) {
+ dram_regions->n_regions = 1;
+ trans->reduce_power_loaded = true;
+ }
+ }
+
+ return ret;
}
-void iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans)
+static void iwl_pcie_set_reduce_power_segments(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
&trans_pcie->prph_scratch->ctrl_cfg;
+ struct iwl_dram_regions *dram_regions = &trans_pcie->reduced_tables_data;
- if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
- return;
+ prph_sc_ctrl->reduce_power_cfg.base_addr =
+ cpu_to_le64(dram_regions->prph_scratch_mem_desc.physical);
+ prph_sc_ctrl->reduce_power_cfg.size =
+ cpu_to_le32(iwl_dram_regions_size(dram_regions));
+}
+
+static void iwl_pcie_set_continuous_reduce_power(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl =
+ &trans_pcie->prph_scratch->ctrl_cfg;
prph_sc_ctrl->reduce_power_cfg.base_addr =
- cpu_to_le64(trans_pcie->reduce_power_dram.physical);
+ cpu_to_le64(trans_pcie->reduced_tables_data.drams[0].physical);
prph_sc_ctrl->reduce_power_cfg.size =
- cpu_to_le32(trans_pcie->reduce_power_dram.size);
+ cpu_to_le32(trans_pcie->reduced_tables_data.drams[0].size);
+}
+
+void
+iwl_trans_pcie_ctx_info_gen3_set_reduce_power(struct iwl_trans *trans,
+ const struct iwl_ucode_capabilities *capa)
+{
+ if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
+ return;
+
+ if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_FRAGMENTED_PNVM_IMG))
+ iwl_pcie_set_reduce_power_segments(trans);
+ else
+ iwl_pcie_set_continuous_reduce_power(trans);
}
@@ -307,10 +307,9 @@ enum iwl_pcie_imr_status {
* @trans: pointer to the generic transport area
* @scd_base_addr: scheduler sram base address in SRAM
* @kw: keep warm address
- * @pnvm_dram: array of several DRAM areas that contains the PNVM data
- * @n_pnvm_regions: number of DRAM regions that were allocated for the pnvm
- * @pnvm_regions_desc_array: array of PNVM payloads addresses.
- * allocated in DRAM and sent to FW.
+ * @pnvm_data: holds info about pnvm payloads allocated in DRAM
+ * @reduced_tables_data: holds info about power reduced tablse
+ * payloads allocated in DRAM
* @pci_dev: basic pci-network driver stuff
* @hw_base: pci hardware address support
* @ucode_write_complete: indicates that the ucode has been copied.
@@ -385,10 +384,8 @@ struct iwl_trans_pcie {
struct iwl_dma_ptr kw;
/* pnvm data */
- struct iwl_dram_data pnvm_dram[IPC_DRAM_MAP_ENTRY_NUM_MAX];
- u8 n_pnvm_regions;
- struct iwl_dram_data pnvm_regions_desc_array;
- struct iwl_dram_data reduce_power_dram;
+ struct iwl_dram_regions pnvm_data;
+ struct iwl_dram_regions reduced_tables_data;
struct iwl_txq *txq_memory;
@@ -485,8 +482,8 @@ struct iwl_trans
const struct pci_device_id *ent,
const struct iwl_cfg_trans_params *cfg_trans);
void iwl_trans_pcie_free(struct iwl_trans *trans);
-void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie,
- struct device *dev);
+void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
+ struct device *dev);
bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans);
#define _iwl_trans_pcie_grab_nic_access(trans) \
@@ -1993,25 +1993,27 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
}
-void iwl_trans_pcie_free_pnvm_dram(struct iwl_trans_pcie *trans_pcie,
- struct device *dev)
+void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
+ struct device *dev)
{
u8 i;
- struct iwl_dram_data *desc_dram = &trans_pcie->pnvm_regions_desc_array;
+ struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
- for (i = 0; i < trans_pcie->n_pnvm_regions; i++) {
- dma_free_coherent(dev, trans_pcie->pnvm_dram[i].size,
- trans_pcie->pnvm_dram[i].block,
- trans_pcie->pnvm_dram[i].physical);
+ /* free DRAM payloads */
+ for (i = 0; i < dram_regions->n_regions; i++) {
+ dma_free_coherent(dev, dram_regions->drams[i].size,
+ dram_regions->drams[i].block,
+ dram_regions->drams[i].physical);
}
- trans_pcie->n_pnvm_regions = 0;
+ dram_regions->n_regions = 0;
+ /* free DRAM addresses array */
if (desc_dram->block) {
dma_free_coherent(dev, desc_dram->size,
desc_dram->block,
desc_dram->physical);
}
- desc_dram->block = NULL;
+ memset(desc_dram, 0, sizeof(*desc_dram));
}
void iwl_trans_pcie_free(struct iwl_trans *trans)
@@ -2046,13 +2048,10 @@ void iwl_trans_pcie_free(struct iwl_trans *trans)
iwl_pcie_free_fw_monitor(trans);
- iwl_trans_pcie_free_pnvm_dram(trans_pcie, trans->dev);
-
- if (trans_pcie->reduce_power_dram.size)
- dma_free_coherent(trans->dev,
- trans_pcie->reduce_power_dram.size,
- trans_pcie->reduce_power_dram.block,
- trans_pcie->reduce_power_dram.physical);
+ iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
+ trans->dev);
+ iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
+ trans->dev);
mutex_destroy(&trans_pcie->mutex);
iwl_trans_free(trans);