diff mbox series

[RFC,v2,49/96] cl8k: add pci.h

Message ID 20220524113502.1094459-50-viktor.barna@celeno.com
State New
Headers show
Series wireless: cl8k driver for Celeno IEEE 802.11ax devices | expand

Commit Message

Viktor Barna May 24, 2022, 11:34 a.m. UTC
From: Viktor Barna <viktor.barna@celeno.com>

(Part of the split. Please, take a look at the cover letter for more
details).

Signed-off-by: Viktor Barna <viktor.barna@celeno.com>
---
 drivers/net/wireless/celeno/cl8k/pci.h | 194 +++++++++++++++++++++++++
 1 file changed, 194 insertions(+)
 create mode 100644 drivers/net/wireless/celeno/cl8k/pci.h
diff mbox series

Patch

diff --git a/drivers/net/wireless/celeno/cl8k/pci.h b/drivers/net/wireless/celeno/cl8k/pci.h
new file mode 100644
index 000000000000..ec6801c7c71b
--- /dev/null
+++ b/drivers/net/wireless/celeno/cl8k/pci.h
@@ -0,0 +1,194 @@ 
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+/* Copyright(c) 2019-2022, Celeno Communications Ltd. */
+
+#ifndef CL_PCI_H
+#define CL_PCI_H
+
+#include "tx.h"
+
+#define CE_INVALID_SN 0xFFFF
+
+struct cl_chip;
+
+enum cl_bus_type {
+	CL_BUS_TYPE_PCI,
+};
+
+struct cl_driver_ops {
+	int (*msg_fw_send)(struct cl_hw *cl_hw, const void *msg_params, bool background);
+	void (*pkt_fw_send)(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr,
+			    struct cl_tx_queue *tx_queue);
+};
+
+/* Struct used to store information about host buffers (DMA Address and local pointer) */
+struct cl_ipc_hostbuf {
+	ptrdiff_t hostid; /* Ptr to hostbuf client (ipc_host client) structure */
+	dma_addr_t dma_addr; /* Ptr to real hostbuf dma address */
+};
+
+/*
+ * Index in txdesc - updated by host on every push, used by firmware side
+ * Keep this structure aligned to 4-byte
+ */
+struct cl_ipc_txdesc_write_idx {
+	__le32 agg[IPC_MAX_BA_SESSIONS];
+	__le32 single[MAX_SINGLE_QUEUES];
+	__le32 bcmc;
+};
+
+struct cl_ipc_ring_indices {
+	/* Last copy of ipc txdesc write desc right after DMA push operation */
+	volatile struct cl_ipc_txdesc_write_idx txdesc_write_idx;
+	/*
+	 * new start sn - equal to last acknowledged sequence number + 1.
+	 * Updated by firmware and used by host.
+	 */
+	volatile __le32 new_ssn_idx[IPC_MAX_BA_SESSIONS];
+	volatile __le32 dtim_count[MAX_BSS_NUM];
+	/* Index in rxdesc array, updated by firmware on every payload push, used by host */
+	volatile __le32 rxdesc_write_idx[CL_RX_BUF_MAX];
+	/* Index in rxdesc array, updated by host on rxdesc copy completion, used by firmware */
+	volatile __le32 rxdesc_read_idx[CL_RX_BUF_MAX];
+	/* BSR data counters */
+	volatile __le32 bsr_data_ctrs[TID_MAX];
+};
+
+/* Structure used to store Shared Txring indices */
+struct cl_ipc_ring_indices_elem {
+	struct cl_ipc_ring_indices *indices;
+	dma_addr_t dma_addr;
+};
+
+struct cl_ipc_host_rxbuf {
+	/* Array of drv desc which holds the skb and additional data */
+	ptrdiff_t **ipc_host_rxdesc_ptr;
+	/* Address of payload for embedded push operation (part of rxdesc data) */
+	u32 *dma_payload_addr;
+	/* Dma pointer to array of DMA payload addresses */
+	__le32 dma_payload_base_addr;
+};
+
+/*
+ * struct tx_queues_dma_addr - ipc layer queues addresses casted to DMA addresses
+ *
+ * The ipc layer points to array of txdesc, there are:
+ * 'IPC_MAX_BA_SESSIONS' arrays for aggregation queues
+ * 'MAX_SINGLE_QUEUES' arrayes for singletons queues
+ * '1' arrays for broadcast/unicast queue
+ *
+ * Each one of this arrays should be copied compeletly to the FW, therefore we should
+ * cast all of the arrays to dma addresses.
+ */
+struct tx_queues_dma_addr {
+	__le32 agg[IPC_MAX_BA_SESSIONS];
+	__le32 single[MAX_SINGLE_QUEUES];
+	__le32 bcmc;
+};
+
+/* struct cl_ipc_tx_queues - ipc layer tx queues */
+struct cl_ipc_tx_queues {
+	struct txdesc *ipc_txdesc_agg[IPC_MAX_BA_SESSIONS];
+	struct txdesc *ipc_txdesc_single[MAX_SINGLE_QUEUES];
+	struct txdesc *ipc_txdesc_bcmc;
+	/* Mapping of the TXQ's addresses to DMA addresses */
+	struct tx_queues_dma_addr *queues_dma_addr;
+	/* DMA address of tx_queues_dma_addr */
+	u32 dma_addr;
+};
+
+struct cl_ipc_host_env {
+	/* Pointer to the shared environment */
+	struct cl_ipc_shared_env __iomem *shared;
+	/* TX ring indices (RD, WR idx & new_ssn) */
+	struct cl_ipc_ring_indices_elem *ring_indices_elem;
+	/* RX buffers (rxdesc & dma_addr) */
+	ptrdiff_t *ipc_host_rxdesc_rxm[IPC_RXBUF_CNT_RXM];
+	ptrdiff_t *ipc_host_rxdesc_fw[IPC_RXBUF_CNT_FW];
+	struct cl_ipc_host_rxbuf rx_hostbuf_array[CL_RX_BUF_MAX];
+	/* Host last read idx */
+	u32 host_rxdesc_read_idx[CL_RX_BUF_MAX];
+	/* Fields for Radar events handling */
+	struct cl_ipc_hostbuf radar_hostbuf_array[IPC_RADAR_BUF_CNT];
+	u8 radar_host_idx;
+	/* Fields for Emb->App MSGs handling */
+	struct cl_ipc_hostbuf e2a_msg_hostbuf_array[IPC_E2A_MSG_BUF_CNT];
+	u8 e2a_msg_host_idx;
+	/* Fields for Debug MSGs handling */
+	struct cl_ipc_hostbuf dbg_hostbuf_array[IPC_DBG_BUF_CNT];
+	u8 dbg_host_idx;
+	/* IPC queues */
+	struct cl_ipc_tx_queues tx_queues;
+	struct cl_ipc_enhanced_tim enhanced_tim;
+	/* Fields for single confirmation handling */
+	u8 *cfm_virt_base_addr;
+	dma_addr_t cfm_dma_base_addr;
+	/* Index used that points to the first used CFM */
+	u32 cfm_used_idx;
+	/* Tasklets */
+	struct tasklet_struct rxdesc_tasklet;
+	struct tasklet_struct tx_single_cfm_tasklet;
+	struct tasklet_struct tx_agg_cfm_tasklet;
+	struct tasklet_struct msg_tasklet;
+	struct tasklet_struct dbg_tasklet;
+	struct tasklet_struct bcn_tasklet;
+};
+
+/* Structure used to store information regarding Debug msg buffers in the driver */
+struct cl_dbg_elem {
+	struct cl_ipc_dbg_msg *dbgbuf_ptr;
+	dma_addr_t dma_addr;
+};
+
+struct cl_debug_info {
+	struct mutex mutex;
+	struct dbg_info *buf;
+	dma_addr_t dma_addr;
+	int bufsz;
+	struct timespec64 trigger_tstamp;
+};
+
+struct cl_rx_elem {
+	int passed;
+	struct sk_buff *skb;
+	dma_addr_t dma_addr;
+};
+
+int cl_ipc_init(struct cl_hw *cl_hw);
+void cl_ipc_recovery(struct cl_hw *cl_hw);
+void cl_ipc_deinit(struct cl_hw *cl_hw);
+void cl_ipc_stop(struct cl_hw *cl_hw);
+int cl_ipc_rx_elem_alloc(struct cl_hw *cl_hw, struct cl_rx_elem *rx_elem, u32 size);
+void cl_ipc_msgbuf_push(struct cl_ipc_host_env *ipc_env, ptrdiff_t hostid, dma_addr_t hostbuf);
+void cl_ipc_rxbuf_push(struct cl_ipc_host_env *ipc_env, struct cl_rx_elem *rx_elem,
+		       u32 rxdesc_read_idx, u32 host_read_idx, enum rx_buf_type type);
+void cl_ipc_radarbuf_push(struct cl_ipc_host_env *ipc_env, ptrdiff_t hostid, dma_addr_t hostbuf);
+void cl_ipc_dbgbuf_push(struct cl_ipc_host_env *ipc_env, ptrdiff_t hostid, dma_addr_t hostbuf);
+void cl_ipc_dbginfobuf_push(struct cl_ipc_host_env *ipc_env, dma_addr_t infobuf);
+
+struct cl_irq_stats {
+	unsigned long last_rx;
+	unsigned long last_tx;
+	unsigned long last_isr;
+	u32 last_isr_statuses;
+	u32 count_irq;
+	u32 ipc_success;
+};
+
+int cl_irq_request(struct cl_chip *chip);
+void cl_irq_free(struct cl_chip *chip);
+void cl_irq_status(struct cl_hw *cl_hw, u32 status);
+void cl_irq_enable(struct cl_hw *cl_hw, u32 value);
+void cl_irq_disable(struct cl_hw *cl_hw, u32 value);
+int cl_msg_pci_msg_fw_send(struct cl_hw *cl_hw, const void *msg_params,
+			   bool background);
+void cl_rx_pci_init(struct cl_hw *cl_hw);
+void cl_rx_pci_deinit(struct cl_hw *cl_hw);
+void cl_rx_pci_desc_handler(struct cl_hw *cl_hw);
+void cl_rx_pci_desc_tasklet(unsigned long data);
+int cl_tx_release_skbs_from_cfm(struct cl_hw *cl_hw, u8 queue_idx, u16 new_ssn);
+void cl_tx_pci_single_cfm_tasklet(unsigned long data);
+void cl_tx_pci_agg_cfm_tasklet(unsigned long data);
+void cl_tx_pci_pkt_fw_send(struct cl_hw *cl_hw, struct cl_sw_txhdr *sw_txhdr,
+			   struct cl_tx_queue *tx_queue);
+
+#endif /* CL_PCI_H */