@@ -717,30 +717,19 @@ struct iwl_tfh_tfd {
/* Fixed (non-configurable) rx data from phy */
/**
- * struct iwlagn_scd_bc_tbl_entry - scheduler byte count table entry
+ * struct iwl_bc_tbl_entry - scheduler byte count table entry
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to 22000:
* @tfd_offset:
* For devices up to 22000:
* 0-12 - tx command byte count
* 12-16 - station index
- * For 22000:
+ * For 22000 and on:
* 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
-struct iwlagn_scd_bc_tbl_entry {
- __le16 tfd_offset;
-} __packed;
-
-/**
- * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3
- * For AX210 and on:
- * @tfd_offset: 0-12 - tx command byte count
- * 12-13 - number of 64 byte chunks
- * 14-16 - reserved
- */
-struct iwl_gen3_bc_tbl_entry {
+struct iwl_bc_tbl_entry {
__le16 tfd_offset;
} __packed;
@@ -3785,6 +3785,7 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
{
struct iwl_trans_pcie *trans_pcie, **priv;
struct iwl_trans *trans;
+ unsigned int bc_tbl_n_entries;
int ret, addr_size;
u32 bar0;
@@ -3833,14 +3834,14 @@ iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_BZ;
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_BZ;
else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_AX210;
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE_AX210;
else
- trans_pcie->txqs.bc_tbl_size =
- sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE;
+ bc_tbl_n_entries = TFD_QUEUE_BC_SIZE;
+
+ trans_pcie->txqs.bc_tbl_size =
+ sizeof(struct iwl_bc_tbl_entry) * bc_tbl_n_entries;
/*
* For gen2 devices, we use a single allocation for each byte-count
* table, but they're pretty small (1k) so use a DMA pool that we
@@ -561,6 +561,7 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
int num_tbs)
{
int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
+ struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
@@ -581,19 +582,15 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
- struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
-
WARN_ON(len > 0x3FFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
- scd_bc_tbl_gen3[idx].tfd_offset = bc_ent;
} else {
- struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
-
len = DIV_ROUND_UP(len, 4);
WARN_ON(len > 0xFFF);
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
- scd_bc_tbl[idx].tfd_offset = bc_ent;
}
+
+ scd_bc_tbl[idx].tfd_offset = bc_ent;
}
static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
@@ -796,7 +796,7 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
return -ENOMEM;
}
-#define BC_TABLE_SIZE (sizeof(struct iwlagn_scd_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
+#define BC_TABLE_SIZE (sizeof(struct iwl_bc_tbl_entry) * TFD_QUEUE_BC_SIZE)
/*
* iwl_pcie_tx_alloc - allocate TX context
@@ -2067,7 +2067,7 @@ static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
int num_tbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl;
+ struct iwl_bc_tbl_entry *scd_bc_tbl;
int write_ptr = txq->write_ptr;
int txq_id = txq->id;
u8 sec_ctl = 0;
@@ -2314,7 +2314,7 @@ static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
int read_ptr)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwlagn_scd_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
+ struct iwl_bc_tbl_entry *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
int txq_id = txq->id;
u8 sta_id = 0;
__le16 bc_ent;