@@ -13,3 +13,6 @@ config MSI_NONBROKEN
# or support it and have a good implementation. See commit
# 47d2b0f33c664533b8dbd5cb17faa8e6a01afe1f.
bool
+
+config CCIX_LIB
+ bool
@@ -13,3 +13,4 @@ common-obj-$(CONFIG_PCI) += ccix_per.o
common-obj-$(call lnot,$(CONFIG_PCI)) += pci-stub.o
common-obj-$(CONFIG_ALL) += pci-stub.o
+common-obj-$(CONFIG_CCIX_LIB) += ccix_lib.o
new file mode 100644
@@ -0,0 +1,1299 @@
+/*
+ * CCIX configuration space creation library
+ *
+ * Copyright (c) 2019 Huawei
+ * Author: Jonathan Cameron <Jonathan.Cameron@Huawei.com>
+ *
+ * Portions copied from pci-testdev.c
+ * Copyright (c) 2012 Red Hat Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "qemu/osdep.h"
+#include "hw/hw.h"
+#include "hw/pci/pci.h"
+#include "qemu/event_notifier.h"
+#include "sysemu/kvm.h"
+#include "hw/misc/ccix.h"
+
+/* Leave space for the SR-IDM and SW portal if enabled */
+#define CCIX_COMMON_CAP_MAX_SIZE 10 * sizeof(uint32_t)
+
+#define CM_CAP_DW1_MULTIPORT_CAP_OFF 0
+#define CM_CAP_DW1_MULTIPORT_CAP_M 0x00000007
+#define CM_CAP_DW1_VERSION_CAP_OFF 22
+#define CM_CAP_DW1_VERSION_CAP_M 0x00C00000
+#define CM_CAP_DW1_DEVID_OFF 24
+#define CM_CAP_DW1_DEVID_M 0xFF000000
+
+#define CM_CAP_DW2_DISC_READY_CAP_OFF 0
+#define CM_CAP_DW2_PART_CS_CAP_OFF 1
+#define CM_CAP_DW2_PORT_AG_CAP_OFF 2
+#define CM_CAP_DW2_CL_SIZE_CAP_OFF 3
+#define CM_CAP_DW2_ADDR_W_CAP_OFF 4
+#define CM_CAP_DW2_MH_CAP_OFF 7
+#define CM_CAP_DW2_SW_PORT_CAP_OFF 8
+#define CM_CAP_DW2_SAM_ALIGN_CAP_OFF 9
+#define CM_CAP_DW2_READY_TIME_VAL_OFF 19
+#define CM_CAP_DW2_READY_TIME_SCALE_OFF 28
+
+/* COMMON_CAP III is reserved */
+#define CM_CAP_DW4_DEV_ERR_LOG_OFFSET_OFF 20
+#define CM_CAP_DW5_IDM_OFFSET_OFF 20
+#define CM_CAP_DW6_RSAM_SIZE_OFF 0
+#define CM_CAP_DW6_RSAM_OFFSET_OFF 20
+#define CM_CAP_DW7_HSAM_SIZE_OFF 0
+#define CM_CAP_DW7_HSAM_OFFSET_OFF 20
+#define CM_CAP_DW8_SR_OFFSET_OFF 20
+#define CM_CAP_DW9_SW_PORTAL_OFF 0
+
+#define CM_CTL_DW1_DEVICE_EN_OFF 0
+#define CM_CTL_DW1_PRIMARY_PORT_EN_OFF 1
+#define CM_CTL_DW1_MESH_EN_OFF 2
+#define CM_CTL_DW1_PORT_AG_EN_OFF 4
+#define CM_CTL_DW1_IDM_TABLE_VALID_OFF 5
+#define CM_CTL_DW1_RSAM_TABLE_VALID_OFF 6
+#define CM_CTL_DW1_HSAM_TABLE_VALID_OFF 7
+#define CM_CTL_DW1_SW_PORT_ENABLE_OFF 8
+#define CM_CTL_DW1_ERR_AGENT_ID_OFF 16
+#define CM_CTL_DW1_ERR_AGENT_ID_M 0x003F0000
+#define CM_CTL_DW1_DEVID_OFF 24
+#define CM_CTL_DW1_DEVID_M 0xFF000000
+
+#define CM_CTL_DW2_PART_CS_EN_OFF 1
+#define CM_CTL_DW2_CL_SIZE_EN_OFF 3
+#define CM_CTL_DW2_ADDR_W_EN_OFF 4
+#define CM_CTL_DW2_ADDR_W_EN_M 0x00000070
+
+#define CCIX_PORT_CAP_SIZE 5 * sizeof(uint32_t)
+
+#define PT_CAP_DW1_DISC_READY_OFF 0
+#define PT_CAP_DW1_OPT_HEADER_OFF 1
+#define PT_CAP_DW1_P2P_FORWARDING_OFF 5
+#define PT_CAP_DW1_LINKS_OFF 7
+#define PT_CAP_DW1_LINKS_M 0x00001F80
+#define PT_CAP_DW1_PSAM_ENTRIES_OFF 13
+#define PT_CAP_DW1_PSAM_ENTRIES_M 0x0007E000
+#define PT_CAP_DW1_PORTID_OFF 27
+#define PT_CAP_DW1_PORTID_M 0xF8000000
+
+/* not including PSAM */
+#define CCIX_PORT_CTL_SIZE 5 * sizeof(uint32_t)
+
+#define PT_CTL_DW1_PORT_EN_OFF 0
+#define PT_CTL_DW1_OPT_HEADER_EN_OFF 1
+#define PT_CTL_DW1_LINKS_EN_OFF 7
+#define PT_CTL_DW1_LINKS_EN_M 0x00001F80
+#define PT_CTL_DW1_PSAM_ENTRIES_EN_OFF 13
+#define PT_CTL_DW1_PSAM_ENTRIES_EN_M 0x0007E000
+
+#define CCIX_LINK_CAP_SIZE 6 * sizeof(uint32_t)
+
+#define LK_CAP_DW1_DISC_READY_OFF 0
+#define LK_CAP_DW1_CREDIT_TYPE_OFF 1
+#define LK_CAP_DW1_MESSAGE_PACKING_OFF 2
+#define LK_CAP_DW1_NOCOMPACK_OFF 6
+#define LK_CAP_DW1_MAX_PKT_SIZE_OFF 7
+#define LK_CAP_DW1_MAX_PKT_SIZE_M 0x00000038
+#define LK_CAP_DW2_MAX_MEM_REQ_SEND_OFF 0
+#define LK_CAP_DW2_MAX_MEM_REQ_SEND_M 0x000003FF
+#define LK_CAP_DW2_MAX_SNP_REQ_SEND_OFF 10
+#define LK_CAP_DW2_MAX_SNP_REQ_SEND_M 0x000FFC00
+#define LK_CAP_DW2_MAX_DAT_REQ_SEND_OFF 20
+#define LK_CAP_DW2_MAX_DAT_REQ_SEND_M 0x3FF00000
+#define LK_CAP_DW3_MAX_MEM_REQ_RECV_OFF 0
+#define LK_CAP_DW3_MAX_MEM_REQ_RECV_M 0x000003FF
+#define LK_CAP_DW3_MAX_SNP_REQ_RECV_OFF 10
+#define LK_CAP_DW3_MAX_SNP_REQ_RECV_M 0x000FFC00
+#define LK_CAP_DW3_MAX_DAT_REQ_RECV_OFF 20
+#define LK_CAP_DW3_MAX_DAT_REQ_RECV_M 0x3FF00000
+#define LK_CAP_DW4_MAX_MISC_REQ_SEND_CAP_OFF 0
+#define LK_CAP_DW4_MAX_MISC_REQ_RECV_CAP_OFF 10
+
+/* Not including per link */
+#define CCIX_LINK_CTL_SIZE 1 * sizeof(uint32_t)
+/* Not including BcastFwdCntlVctr, including Tranport ID map */
+#define CCIX_LINK_CTL_PER_LINK_SIZE (6 + 1) * sizeof(uint32_t)
+
+/* Per link controls */
+#define LK_CTL_DW1_LINK_EN_OFF 0
+#define LK_CTL_DW1_CREDIT_EN_OFF 1
+#define LK_CTL_DW1_MESSAGE_PACKING_EN_OFF 2
+#define LK_CTL_DW1_NO_COMP_ACK_EN_OFF 6
+#define LK_CTL_DW1_MAX_PKT_SIZE_EN_OFF 7
+#define LK_CTL_DW1_MAX_PKT_SIZE_EN_M 0x00000380
+#define LK_CTL_DW1_LINK_ENT_ADDR_TYPE_OFF 10
+
+#define CCIX_RA_CAP_SIZE 3 * sizeof(uint32_t)
+
+#define RA_CAP_DW1_RA_DISC_RDY_STAT_OFF 0
+#define RA_CAP_DW1_RA_CACHE_FLUSH_TIME_VALUE_OFF 19
+#define RA_CAP_DW1_RA_CACHE_FLUSH_TIME_VALUE_M 0x0FF80000
+#define RA_CAP_DW1_RA_CACHE_FLUSH_TIME_SCALE_OFF 28
+#define RA_CAP_DW1_RA_CACHE_FLUSH_TIME_SCALE_M 0x70000000
+#define RA_CAP_DW1_RA_CACHE_FLUSH_STA_OFF 31
+
+#define RA_CAP_DW2_RA_ERROR_LOG_OFFSET_OFF 20
+
+#define CCIX_RA_CTL_SIZE 4 * sizeof(uint32_t)
+
+#define RA_CTL_DW1_EN_OFF 0
+#define RA_CTL_DW1_SNOOP_RESP_EN_OFF 1
+#define RA_CTL_DW1_CACHE_FLUSH_EN_OFF 14
+#define RA_CTL_DW1_CACHE_EN_OFF 15
+#define RA_CTL_DW1_RAID_OFF 26
+#define RA_CTL_DW1_RAID_M 0xFC000000
+
+/* Excluding pool entries */
+#define CCIX_HA_CAP_SIZE 3 * sizeof(uint32_t)
+
+#define HA_CAP_DW1_HA_DISC_RD_STAT_OFF 0
+#define HA_CAP_DW1_NUM_HA_IDS_OFF 1
+#define HA_CAP_DW1_HA_MEM_POOL_CAP_OFF 8
+#define HA_CAP_DW1_HA_QACK_OFF 14
+#define HA_CAP_DW1_HA_HW_QACK_CAP_OFF 15
+#define HA_CAP_DW1_HA_MEM_EXP_CAP_OFF 16
+#define HA_CAP_DW1_HA_EVICT_HINT_CAP_OFF 17
+#define HA_CAP_DW1_HA_WRITE_EVICT_FULL_HIT_CAP_OFF 18
+#define HA_CAP_DW1_MEM_POOL_READY_TIME_VALUE_OFF 19
+#define HA_CAP_DW1_MEM_POOL_READY_TIME_VALUE_M 0x0FF80000
+#define HA_CAP_DW1_MEM_POOL_READY_TIME_SCALE_OFF 28
+#define HA_CAP_DW1_MEM_POOL_READY_TIME_SCALE_M 0x70000000
+#define HA_CAP_DW1_MEM_POOL_READY_STA_OFF 31
+
+#define HA_CAP_DW2_HA_ERROR_LOG_OFF 20
+
+#define MEM_POOL_CAP_DW1_READY_STA_OFF 0
+#define MEM_POOL_CAP_DW1_GEN_MEM_TYPE_OFF 1
+enum {
+ mem_type_other = 0,
+ mem_type_expan,
+ mem_type_hole,
+ mem_type_rom,
+ mem_type_volatile,
+ mem_type_non_volatile,
+ mem_type_device,
+};
+
+#define MEM_POOL_CAP_DW1_SPEC_MEM_TYPE_OFF 4
+enum {
+ mem_spec_other = 0,
+ mem_spec_sram,
+ mem_spec_ddr,
+ mem_spec_nvdimm_f,
+ mem_spec_nvdimm_n,
+ mem_spec_hbm,
+ mem_spec_flash,
+};
+#define MEM_POOL_CAP_DW1_ADDR_CAP_OFF 7
+#define MEM_POOL_CAP_DW1_MEM_ATTR_OFF 8
+#define MEM_POOL_CAP_DW1_MEM_ATTR_DEV 0x0
+#define MEM_POOL_CAP_DW1_MEM_ATTR_NONCACHE 0x4
+#define MEM_POOL_CAP_DW1_MEM_ATTR_NORMAL 0x5
+#define MEM_POOL_CAP_DW1_MEM_EXT_ATTR_OFF 11
+#define MEM_POOL_CAP_DW1_MEM_EXT_ATTR_NONE 0x0
+#define MEM_POOL_CAP_DW1_MEM_EXT_ATTR_PRIVATE 0x1
+#define MEM_POOL_CAP_DW1_MEM_POOL_SIZE_L_CAP_OFF 16
+
+#define MEM_POOL_CAP_DW2_MEM_POOL_SIZE_H_CAP_OFF 0
+
+/* Not including HAID or HBAT entries */
+#define CCIX_HA_CTL_SIZE 6 * sizeof(uint32_t)
+#define CCIX_HAID_ENTRY_SIZE 1 * sizeof(uint32_t)
+#define CCIX_BAT_ENTRY_SIZE 2 * sizeof(uint32_t)
+
+#define CCIX_POOL_CAP_SIZE 2 * sizeof(uint32_t)
+#define CCIX_GUID_SIZE 5 * sizeof(uint32_t)
+#define CCIX_SAM_ENTRY_SIZE 3 * sizeof(uint32_t)
+#define CCIX_GUID_DW0 0xC3CB993B
+#define CCIX_GUID_DW1 0x02C4436F
+#define CCIX_GUID_DW2 0x9B68D271
+#define CCIX_GUID_DW3 0xF2E8CA31
+#define CCIX_GUID_DW4_VERSION_OFF 0
+
+/* HACK - What is the best way to do this in Qemu? */
+CCIXState *CCIXFuncs[256];
+
+typedef void (*am_cb_t)(PCIDevice *d, CCIXState *s, uint16_t offset,
+ uint16_t cap_start_offset, uint32_t val);
+struct am {
+ uint16_t offset;
+ uint16_t cap_start_offset;
+ am_cb_t am_cb;
+};
+
+static int am_cmp(const void *ap, const void *bp)
+{
+ const uint16_t *a;
+ const uint16_t *b;
+
+ a = ap;
+ b = bp;
+
+ return *a - *b;
+}
+
+void initialize_ccixstate(CCIXState *s, PCIDevice *d)
+{
+ s->am_tree = g_tree_new(am_cmp);
+ s->pci_dev = d;
+}
+
+static void am_table_add(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start_offset, am_cb_t cb)
+{
+ struct am *entry = g_malloc0(sizeof *entry);
+
+ if (entry) {
+ entry->offset = offset;
+ entry->cap_start_offset = cap_start_offset;
+ entry->am_cb = cb;
+ g_tree_insert(s->am_tree, &entry->offset, entry);
+ }
+}
+
+void ccix_write_config(PCIDevice *pci_dev, CCIXState *s, uint32_t addr, uint32_t val_in, int l)
+{
+ uint64_t key = addr;
+ struct am *entry = g_tree_lookup(s->am_tree, &key);
+
+ if (entry && entry->offset == addr)
+ entry->am_cb(pci_dev, s, addr, entry->cap_start_offset, val_in);
+}
+
+#define CCIX_COMP_ID_GENERAL 0x0000
+#define CCIX_COMP_ID_TDL_DVSEC 0x0001
+#define CCIX_COMP_ID_PRL_DVSEC 0x0002
+#define CCIX_COMP_ID_PRL_COMMON 0x0003
+#define CCIX_COMP_ID_PRL_PORT 0x0004
+#define CCIX_COMP_ID_PRL_LINK 0x0005
+#define CCIX_COMP_ID_HA 0x0006
+#define CCIX_COMP_ID_RA 0x0008
+#define CCIX_COMP_ID_SA 0x000A
+
+#define CCIX_DVSEC_CAP_POS_OFFSET 0x0C
+#define CCIX_DVSEC_CTR_POS_OFFSET 0x10
+
+static void ccix_dvsec_fill_dvsec_header(PCIDevice *pci_dev, uint16_t offset,
+ uint16_t cap_size,
+ uint16_t cap_offset,
+ uint16_t control_size,
+ uint16_t control_offset)
+{
+ uint32_t *dword_addr;
+
+ dword_addr = (uint32_t *)(pci_dev->config + offset +
+ CCIX_DVSEC_CAP_POS_OFFSET);
+ *dword_addr = cap_size | (cap_offset << 20);
+ dword_addr = (uint32_t *)(pci_dev->config + offset +
+ CCIX_DVSEC_CTR_POS_OFFSET);
+ *dword_addr = control_size | (control_offset << 20);
+}
+
+static void ccix_fill_cap_header(PCIDevice *pci_dev, uint16_t cap_offset,
+ uint16_t ccix_comp, uint16_t next_offset)
+{
+ uint8_t ver = 1;
+ uint32_t *dword_addr;
+
+ dword_addr = (uint32_t *)(pci_dev->config + cap_offset);
+ *dword_addr = ccix_comp | (ver << 16) | (next_offset << 20);
+}
+
+static void ra_ctl_dw1_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t cur_val, new_val = 0;
+ bool ra_en_cur, ra_en_req;
+ uint32_t capval;
+ bool ra_snoop_resp_en_cur, ra_snoop_resp_en_req;
+ bool ra_cache_flush_en_cur, ra_cache_flush_en_req;
+ bool ra_cache_en_cur, ra_cache_en_req;
+ uint8_t raid_cur, raid_req;
+
+ cur_val = pci_get_long(pci_dev->config + offset);
+ ra_en_cur = cur_val & (1 << RA_CTL_DW1_EN_OFF);
+ ra_en_req = req_val & (1 << RA_CTL_DW1_EN_OFF);
+
+ if (ra_en_cur != ra_en_req)
+ printf("Changing RA Enable to %d\n", ra_en_req);
+
+ if (ra_en_req)
+ new_val = (1 << RA_CTL_DW1_EN_OFF);
+
+ ra_snoop_resp_en_cur = cur_val & (1 << RA_CTL_DW1_SNOOP_RESP_EN_OFF);
+ ra_snoop_resp_en_req = req_val & (1 << RA_CTL_DW1_SNOOP_RESP_EN_OFF);
+
+ if (ra_snoop_resp_en_cur != ra_snoop_resp_en_req)
+ printf("Changing RA Snoop REsp Enabled to %d\n",
+ ra_snoop_resp_en_req);
+
+ if (ra_snoop_resp_en_req)
+ new_val |= (1 << RA_CTL_DW1_SNOOP_RESP_EN_OFF);
+
+ ra_cache_flush_en_cur = cur_val & (1 << RA_CTL_DW1_CACHE_FLUSH_EN_OFF);
+ ra_cache_flush_en_req = req_val & (1 << RA_CTL_DW1_CACHE_FLUSH_EN_OFF);
+ if (ra_cache_flush_en_cur != ra_cache_flush_en_req) {
+ printf("Enabling or disabling a cache flush %d\n",
+ ra_cache_flush_en_req);
+
+ //Make the cache flush status update instantaneous for now
+ capval = pci_get_long(pci_dev->config + cap_start + 0x4);
+ if (ra_cache_flush_en_req)
+ capval |= (1 << RA_CAP_DW1_RA_CACHE_FLUSH_STA_OFF);
+ else
+ capval &= ~ (1 << RA_CAP_DW1_RA_CACHE_FLUSH_STA_OFF);
+ pci_set_long(pci_dev->config + cap_start + 0x4, capval);
+ }
+ if (ra_cache_flush_en_req)
+ new_val |= (1 << RA_CTL_DW1_CACHE_FLUSH_EN_OFF);
+
+ ra_cache_en_cur = cur_val & (1 << RA_CTL_DW1_CACHE_EN_OFF);
+ ra_cache_en_req = req_val & (1 << RA_CTL_DW1_CACHE_EN_OFF);
+ if (ra_cache_en_cur != ra_cache_en_req)
+ printf("Enabling of disabling the RA cache %d\n", ra_cache_en_req);
+
+ if (ra_cache_en_req)
+ new_val |= (1 << RA_CTL_DW1_CACHE_EN_OFF);
+
+ raid_cur = (cur_val & RA_CTL_DW1_RAID_M) >> RA_CTL_DW1_RAID_OFF;
+ raid_req = (req_val & RA_CTL_DW1_RAID_M) >> RA_CTL_DW1_RAID_OFF;
+ if (raid_cur != raid_req)
+ printf("RA ID changing from %u to %u\n", raid_cur, raid_req);
+
+ new_val |= (raid_req << RA_CTL_DW1_RAID_OFF) & RA_CTL_DW1_RAID_M;
+
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+
+static void comncntl1_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t cur_val, new_val = 0;
+ bool dev_en_req, dev_en_cur;
+ bool pp_en_req, pp_en_cur;
+ bool id_val_req, id_val_cur;
+ bool rsam_val_req, rsam_val_cur;
+ uint8_t eaid_req, eaid_cur;
+ uint8_t devid_req, devid_cur;
+ int i;
+ uint32_t capval;
+
+ cur_val = pci_get_long(pci_dev->config + offset);
+
+ dev_en_req = req_val & (1 << CM_CTL_DW1_DEVICE_EN_OFF);
+ dev_en_cur = cur_val & (1 << CM_CTL_DW1_DEVICE_EN_OFF);
+ if (dev_en_req != dev_en_cur) {
+ printf("Dev enable changing to %d\n", dev_en_req);
+ /* Now I need to set it on all devices */
+ if (s->ccix_dev_name)
+ for (i = 0; i < sizeof(CCIXFuncs)/sizeof(*CCIXFuncs); i++) {
+ if (!CCIXFuncs[i] || !CCIXFuncs[i]->ccix_dev_name)
+ continue;
+ if (CCIXFuncs[i] != s && !strcmp(CCIXFuncs[i]->ccix_dev_name, s->ccix_dev_name)) {
+ pci_set_long(CCIXFuncs[i]->pci_dev->config + CCIXFuncs[i]->enable_offset,
+ pci_get_word(CCIXFuncs[i]->pci_dev->config + CCIXFuncs[i]->enable_offset) | 0x1);
+ }
+ }
+ }
+ if (dev_en_req)
+ new_val |= (1 << CM_CTL_DW1_DEVICE_EN_OFF);
+
+ pp_en_req = req_val & (1 << CM_CTL_DW1_PRIMARY_PORT_EN_OFF);
+ pp_en_cur = cur_val & (1 << CM_CTL_DW1_PRIMARY_PORT_EN_OFF);
+ if (pp_en_req != pp_en_cur)
+ printf("Primary port enable changing to %d\n", pp_en_req);
+ if (pp_en_req)
+ new_val |= (1 << CM_CTL_DW1_PRIMARY_PORT_EN_OFF);
+ /* NOT DOING MESH ENABLE FOR NOW */
+ /* NOT DOING PORT AG FOR NOW */
+
+ id_val_req = req_val & (1 << CM_CTL_DW1_IDM_TABLE_VALID_OFF);
+ id_val_cur = cur_val & (1 << CM_CTL_DW1_IDM_TABLE_VALID_OFF);
+ if (id_val_req != id_val_cur)
+ printf("Validity of IDM changing to %d\n", id_val_req);
+ if (id_val_req)
+ new_val |= (1 << CM_CTL_DW1_IDM_TABLE_VALID_OFF);
+
+ rsam_val_req = req_val & (1 << CM_CTL_DW1_RSAM_TABLE_VALID_OFF);
+ rsam_val_cur = cur_val & (1 << CM_CTL_DW1_RSAM_TABLE_VALID_OFF);
+ if (rsam_val_req != rsam_val_cur)
+ printf("RSAM valid changing to %d\n", rsam_val_req);
+ if (rsam_val_req)
+ new_val |= (1 << CM_CTL_DW1_RSAM_TABLE_VALID_OFF);
+ /* NOT DOING HSAM FOR NOW */
+ /* NOT DOING SW SERVICES PORTAL FOR NOW */
+
+ eaid_req = (req_val & CM_CTL_DW1_ERR_AGENT_ID_M) >>
+ CM_CTL_DW1_ERR_AGENT_ID_OFF;
+ eaid_cur = (cur_val & CM_CTL_DW1_ERR_AGENT_ID_M) >>
+ CM_CTL_DW1_ERR_AGENT_ID_OFF;
+ if (eaid_req != eaid_cur)
+ printf("EAID for device changing from %d to %d\n",
+ eaid_cur, eaid_req);
+ new_val |= eaid_req << CM_CTL_DW1_ERR_AGENT_ID_OFF;
+
+ devid_req = (req_val & CM_CTL_DW1_DEVID_M) >> CM_CTL_DW1_DEVID_OFF;
+ devid_cur = (cur_val & CM_CTL_DW1_DEVID_M) >> CM_CTL_DW1_DEVID_OFF;
+ if (devid_req != devid_cur) {
+ printf("DEVID changing from %d to %d, updating status\n",
+ devid_cur, devid_req);
+
+ capval = pci_get_long(pci_dev->config + cap_start + 0x4);
+ capval &= ~CM_CAP_DW1_DEVID_M;
+ capval |= (uint32_t)devid_req << CM_CAP_DW1_DEVID_OFF;
+ pci_set_long(pci_dev->config + cap_start + 4, capval);
+
+ if (s->ccix_dev_name)
+ for (i = 0; i < sizeof(CCIXFuncs)/sizeof(*CCIXFuncs); i++) {
+ if (!CCIXFuncs[i] || !CCIXFuncs[i]->ccix_dev_name)
+ continue;
+ if (CCIXFuncs[i] != s && !strcmp(CCIXFuncs[i]->ccix_dev_name, s->ccix_dev_name)) {
+ capval = pci_get_long(CCIXFuncs[i]->pci_dev->config + CCIXFuncs[i]->enable_offset);
+ capval &= ~CM_CAP_DW1_DEVID_M;
+ capval |= (uint32_t)devid_req << CM_CAP_DW1_DEVID_OFF;
+ pci_set_long(CCIXFuncs[i]->pci_dev->config + CCIXFuncs[i]->enable_offset, capval);
+ }
+ }
+ }
+ new_val |= (uint32_t)devid_req << CM_CTL_DW1_DEVID_OFF;
+
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+static void comncntl2_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t cur_val, new_val = 0;
+ bool partial_cur, partial_req;
+ uint32_t addrw_cur, addrw_req;
+
+ cur_val = pci_get_long(pci_dev->config + offset);
+
+ partial_cur = cur_val & (1 << CM_CTL_DW2_PART_CS_EN_OFF);
+ partial_req = req_val & (1 << CM_CTL_DW2_PART_CS_EN_OFF);
+ if (partial_cur != partial_req)
+ printf("Changing Partial Cache enable to %d\n", partial_req);
+ if (partial_req)
+ new_val = (1 << CM_CTL_DW2_PART_CS_EN_OFF);
+ /* Cacheline size 128 not yet supported */
+
+ addrw_cur = (cur_val & CM_CTL_DW2_ADDR_W_EN_M) >> CM_CTL_DW2_ADDR_W_EN_OFF;
+ addrw_req = (req_val & CM_CTL_DW2_ADDR_W_EN_M) >> CM_CTL_DW2_ADDR_W_EN_OFF;
+ /* No sanity checking yet */
+ if (addrw_cur != addrw_req)
+ printf("Changing Address Width to %d\n", addrw_req);
+ new_val = (addrw_req << CM_CTL_DW2_ADDR_W_EN_OFF);
+
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+static void port_ctl_dw1_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t cur_val;
+ uint32_t new_val = 0;
+ bool en_req, en_cur;
+ bool opt_req, opt_cur;
+ uint32_t links_req, links_cur;
+ uint32_t psam_req, psam_cur;
+
+ cur_val = pci_get_long(pci_dev->config + offset);
+ en_cur = cur_val & (1 << PT_CTL_DW1_PORT_EN_OFF);
+ en_req = req_val & (1 << PT_CTL_DW1_PORT_EN_OFF);
+ if (en_cur != en_req)
+ printf("Enabling of port changing to %d\n", en_req);
+ if (en_req)
+ new_val |= (1 << PT_CTL_DW1_PORT_EN_OFF);
+
+ opt_cur = cur_val & (1 << PT_CTL_DW1_OPT_HEADER_EN_OFF);
+ opt_req = req_val & (1 << PT_CTL_DW1_OPT_HEADER_EN_OFF);
+ if (opt_cur != opt_req)
+ printf("Enabling of optimized header changing to %d\n", opt_req);
+ if (opt_req)
+ new_val |= (1 << PT_CTL_DW1_OPT_HEADER_EN_OFF);
+
+ links_cur = (cur_val & PT_CTL_DW1_LINKS_EN_M) >> PT_CTL_DW1_LINKS_EN_OFF;
+ links_req = (req_val & PT_CTL_DW1_LINKS_EN_M) >> PT_CTL_DW1_LINKS_EN_OFF;
+ if (links_cur != links_req)
+ printf("Number of enabled links changing from %d to %d\n",
+ links_cur, links_req);
+ new_val |= (links_req << PT_CTL_DW1_LINKS_EN_OFF);
+
+ psam_cur = (cur_val & PT_CTL_DW1_PSAM_ENTRIES_EN_M) >>
+ PT_CTL_DW1_PSAM_ENTRIES_EN_OFF;
+ psam_req = (req_val & PT_CTL_DW1_PSAM_ENTRIES_EN_M) >>
+ PT_CTL_DW1_PSAM_ENTRIES_EN_OFF;
+ if (psam_cur != psam_req)
+ printf("Number of psam entries changing from %d to %d\n",
+ psam_cur, psam_req);
+ new_val |= (psam_req << PT_CTL_DW1_PSAM_ENTRIES_EN_OFF);
+
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+static void lk_ctl_dw1_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t cur_val;
+ uint32_t new_val = 0;
+ bool en_cur, en_req;
+ bool cred_en_cur, cred_en_req;
+ bool mpack_en_cur, mpack_en_req;
+ bool nocompack_cur, nocompack_req;
+ uint32_t maxpkt_cur, maxpkt_req;
+ bool addr_type_cur, addr_type_req;
+
+ cur_val = pci_get_long(pci_dev->config + offset);
+
+ en_cur = cur_val & (1 << LK_CTL_DW1_LINK_EN_OFF);
+ en_req = req_val & (1 << LK_CTL_DW1_LINK_EN_OFF);
+ if (en_cur != en_req)
+ printf("Changing link enabled status to %d\n", en_req);
+ if (en_req)
+ new_val |= (1 << LK_CTL_DW1_LINK_EN_OFF);
+
+ cred_en_cur = cur_val & (1 << LK_CTL_DW1_CREDIT_EN_OFF);
+ cred_en_req = req_val & (1 << LK_CTL_DW1_CREDIT_EN_OFF);
+ if (cred_en_cur != cred_en_req)
+ printf("Changing link credit enable status to %d\n", cred_en_cur);
+ if (cred_en_req)
+ new_val |= (1 << LK_CTL_DW1_CREDIT_EN_OFF);
+
+ mpack_en_cur = cur_val & (1 << LK_CTL_DW1_MESSAGE_PACKING_EN_OFF);
+ mpack_en_req = req_val & (1 << LK_CTL_DW1_MESSAGE_PACKING_EN_OFF);
+ if (mpack_en_cur != mpack_en_req)
+ printf("Changing message packing enable to %d\n", mpack_en_req);
+ if (mpack_en_req)
+ new_val |= (1 << LK_CTL_DW1_MESSAGE_PACKING_EN_OFF);
+ nocompack_cur = cur_val & (1 << LK_CTL_DW1_NO_COMP_ACK_EN_OFF);
+ nocompack_req = req_val & (1 << LK_CTL_DW1_NO_COMP_ACK_EN_OFF);
+ if (nocompack_cur != nocompack_req)
+ printf("Setting nocompack for link to %d\n", nocompack_req);
+ if (nocompack_req)
+ new_val |= (1 << LK_CTL_DW1_NO_COMP_ACK_EN_OFF);
+
+ maxpkt_cur = (cur_val & LK_CTL_DW1_MAX_PKT_SIZE_EN_M) >>
+ LK_CTL_DW1_MAX_PKT_SIZE_EN_OFF;
+ maxpkt_req = (req_val & LK_CTL_DW1_MAX_PKT_SIZE_EN_M) >>
+ LK_CTL_DW1_MAX_PKT_SIZE_EN_OFF;
+ if (maxpkt_cur != maxpkt_req)
+ printf("Changing max packet size on link from %d to %d\n",
+ maxpkt_cur, maxpkt_req);
+ new_val |= maxpkt_req << LK_CTL_DW1_MAX_PKT_SIZE_EN_OFF;
+
+ addr_type_cur = cur_val & (1 << LK_CTL_DW1_LINK_ENT_ADDR_TYPE_OFF);
+ addr_type_req = req_val & (1 << LK_CTL_DW1_LINK_ENT_ADDR_TYPE_OFF);
+ if (addr_type_cur != addr_type_req)
+ printf("Link Entry Address Type changed to %d\n", addr_type_req);
+ if (addr_type_req)
+ new_val |= (1 << LK_CTL_DW1_LINK_ENT_ADDR_TYPE_OFF);
+
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+static void lk_ctl_dw2_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t new_val = req_val;
+ /* TODO */
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+static void lk_ctl_dw3_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t new_val = req_val;
+ /* TODO */
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+static void lk_ctl_dw4_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ uint32_t new_val = req_val;
+ /* TODO */
+ pci_set_long(pci_dev->config + offset, new_val);
+}
+
+static void idm_entry_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ /* No sanity checking */
+ printf("Setting idm entry\n");
+ pci_set_long(pci_dev->config + offset, req_val);
+}
+
+static void psam_entry_dw0_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ /* No sanity checking */
+ printf("Setting psam entry dw0\n");
+ pci_set_long(pci_dev->config + offset, req_val);
+}
+
+static void psam_entry_dw1_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ /* No sanity checking */
+ printf("Setting psam entry dw1\n");
+ pci_set_long(pci_dev->config + offset, req_val);
+}
+
+static void psam_entry_dw2_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ /* No sanity checking */
+ printf("Setting psam entry dw2\n");
+ pci_set_long(pci_dev->config + offset, req_val);
+}
+
+
+static void sam_entry_dw0_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ /* No sanity checking */
+ printf("Setting sam entry dw0\n");
+ pci_set_long(pci_dev->config + offset, req_val);
+}
+
+static void sam_entry_dw1_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ /* No sanity checking */
+ printf("Setting sam entry dw1\n");
+ pci_set_long(pci_dev->config + offset, req_val);
+}
+
+static void sam_entry_dw2_set(PCIDevice *pci_dev, CCIXState *s, uint16_t offset,
+ uint16_t cap_start, uint32_t req_val)
+{
+ /* No sanity checking */
+ printf("Setting sam entry dw2\n");
+ pci_set_long(pci_dev->config + offset, req_val);
+}
+
+static void ccix_prl_common_cap(PCIDevice *pci_dev,
+ CCIXState *s,
+ uint16_t this_cap_offset,
+ uint16_t next_cap_offset,
+ uint16_t device_err_log_offset,
+ uint16_t idm_table_offset,
+ uint16_t rsam_offset, uint16_t rsam_size,
+ uint16_t hsam_offset, uint16_t hsam_size,
+ uint16_t sr_table_offset,
+ uint16_t sw_portal_offset)
+{
+ uint32_t *cap_start = (uint32_t *)(pci_dev->config + this_cap_offset);
+ uint32_t multiport_dev_cap;
+
+ /* Put in our first capability */
+ ccix_fill_cap_header(pci_dev, this_cap_offset, CCIX_COMP_ID_PRL_COMMON,
+ next_cap_offset);
+ /* ComnCapStat1 */
+ if (s->flags & (1 << PRIMARY_PORT_BIT))
+ multiport_dev_cap = 0x7;
+ else
+ multiport_dev_cap = 0x5;
+
+ *(cap_start + 1) = (multiport_dev_cap << CM_CAP_DW1_MULTIPORT_CAP_OFF) |
+ (0 << CM_CAP_DW1_VERSION_CAP_OFF) |
+ (0 << CM_CAP_DW1_DEVID_OFF);
+ /* ComnCapStat2 */
+ *(cap_start + 2) = (1 << CM_CAP_DW2_DISC_READY_CAP_OFF) |
+ (0 << CM_CAP_DW2_PART_CS_CAP_OFF) |
+ (0 << CM_CAP_DW2_PORT_AG_CAP_OFF) |
+ /* 64 byte only */
+ (0 << CM_CAP_DW2_CL_SIZE_CAP_OFF) |
+ /* 48 bit addressing only */
+ (0 << CM_CAP_DW2_ADDR_W_CAP_OFF) |
+ /* No multihop port aggregation */
+ (0 << CM_CAP_DW2_MH_CAP_OFF) |
+ ((sw_portal_offset ? 1 : 0) << CM_CAP_DW2_SW_PORT_CAP_OFF) |
+ /* Natural alignment over 4GB */
+ (1 << CM_CAP_DW2_SAM_ALIGN_CAP_OFF) |
+ /* random time values */
+ (3 << CM_CAP_DW2_READY_TIME_VAL_OFF) |
+ (1 << CM_CAP_DW2_READY_TIME_SCALE_OFF);
+ /* ComnCapStat3 */
+ *(cap_start + 3) = 0;
+ /* Device Error Log Offset */
+ *(cap_start + 4) = device_err_log_offset << CM_CAP_DW4_DEV_ERR_LOG_OFFSET_OFF;
+ *(cap_start + 5) = idm_table_offset << CM_CAP_DW5_IDM_OFFSET_OFF;
+ *(cap_start + 6) = (rsam_size << CM_CAP_DW6_RSAM_SIZE_OFF) |
+ (rsam_offset << CM_CAP_DW6_RSAM_OFFSET_OFF);
+ *(cap_start + 7) = (hsam_size << CM_CAP_DW7_HSAM_SIZE_OFF) |
+ (hsam_offset << CM_CAP_DW7_HSAM_OFFSET_OFF);
+ *(cap_start + 8) = sr_table_offset << CM_CAP_DW8_SR_OFFSET_OFF;
+ *(cap_start + 9) = sw_portal_offset << CM_CAP_DW9_SW_PORTAL_OFF;
+}
+
+static void ccix_prl_ra_cap(PCIDevice *pci_dev,
+ uint16_t this_cap_offset,
+ uint16_t next_cap_offset,
+ uint16_t error_log_offset)
+{
+ uint32_t *cap_start = (uint32_t *)(pci_dev->config + this_cap_offset);
+
+ ccix_fill_cap_header(pci_dev, this_cap_offset, CCIX_COMP_ID_RA, next_cap_offset);
+ *(cap_start + 1) = (1 << RA_CAP_DW1_RA_DISC_RDY_STAT_OFF) |
+ /* Some example values follow */
+ (3 << RA_CAP_DW1_RA_CACHE_FLUSH_TIME_VALUE_OFF) |
+ (1 << RA_CAP_DW1_RA_CACHE_FLUSH_TIME_SCALE_OFF) |
+ (0 << RA_CAP_DW1_RA_CACHE_FLUSH_STA_OFF);
+
+ *(cap_start + 2) = (error_log_offset << RA_CAP_DW2_RA_ERROR_LOG_OFFSET_OFF);
+}
+
+static void ccix_prl_ha_cap(PCIDevice *pci_dev,
+ uint16_t this_cap_offset,
+ uint16_t next_cap_offset,
+ uint16_t error_log_offset,
+ uint8_t num_ids,
+ uint8_t num_pools)
+{
+ uint32_t *cap_start = (uint32_t *)(pci_dev->config + this_cap_offset);
+ int i;
+
+ ccix_fill_cap_header(pci_dev, this_cap_offset, CCIX_COMP_ID_HA, next_cap_offset);
+ *(cap_start + 1) = (1 << HA_CAP_DW1_HA_DISC_RD_STAT_OFF) |
+ ((num_ids - 1) << HA_CAP_DW1_NUM_HA_IDS_OFF) |
+ (num_pools << HA_CAP_DW1_HA_MEM_POOL_CAP_OFF) |
+ (0 << HA_CAP_DW1_HA_QACK_OFF) |
+ (0 << HA_CAP_DW1_HA_HW_QACK_CAP_OFF) |
+ (0 << HA_CAP_DW1_HA_MEM_EXP_CAP_OFF) | /* No support for SAs being homed here */
+ (0 << HA_CAP_DW1_HA_EVICT_HINT_CAP_OFF) |
+ (0 << HA_CAP_DW1_HA_WRITE_EVICT_FULL_HIT_CAP_OFF) |
+ (3 << HA_CAP_DW1_MEM_POOL_READY_TIME_VALUE_OFF) |
+ (4 << HA_CAP_DW1_MEM_POOL_READY_TIME_SCALE_OFF) |
+ (1 << HA_CAP_DW1_MEM_POOL_READY_STA_OFF);
+ *(cap_start + 2) = error_log_offset << HA_CAP_DW2_HA_ERROR_LOG_OFF;
+
+ for (i = 0; i < num_pools; i++) {
+ *(cap_start + 3 + i * 2) = (1 << MEM_POOL_CAP_DW1_READY_STA_OFF) |
+ (mem_type_volatile << MEM_POOL_CAP_DW1_GEN_MEM_TYPE_OFF) |
+ (mem_spec_hbm << MEM_POOL_CAP_DW1_SPEC_MEM_TYPE_OFF) |
+ (0 << MEM_POOL_CAP_DW1_ADDR_CAP_OFF) |
+ (MEM_POOL_CAP_DW1_MEM_ATTR_NORMAL << MEM_POOL_CAP_DW1_MEM_ATTR_OFF) |
+ (MEM_POOL_CAP_DW1_MEM_EXT_ATTR_NONE << MEM_POOL_CAP_DW1_MEM_EXT_ATTR_OFF) |
+ (0xFFFF << MEM_POOL_CAP_DW1_MEM_POOL_SIZE_L_CAP_OFF);
+ *(cap_start + 3 + i * 2 + 1) = 0xF << MEM_POOL_CAP_DW2_MEM_POOL_SIZE_H_CAP_OFF;
+ }
+}
+
+static void ccix_prl_port_cap(PCIDevice *pci_dev,
+ CCIXState *s,
+ uint16_t this_cap_offset,
+ uint16_t next_cap_offset,
+ uint16_t error_log_offset)
+{
+ uint32_t *cap_start = (uint32_t *)(pci_dev->config + this_cap_offset);
+
+ ccix_fill_cap_header(pci_dev, this_cap_offset, CCIX_COMP_ID_PRL_PORT,
+ next_cap_offset);
+ *(cap_start + 1) = (1 << PT_CAP_DW1_DISC_READY_OFF) |
+ (1 << PT_CAP_DW1_OPT_HEADER_OFF) |
+ (0 << PT_CAP_DW1_P2P_FORWARDING_OFF) |
+ (s->num_links << PT_CAP_DW1_LINKS_OFF) |
+ (s->psam_entries << PT_CAP_DW1_PSAM_ENTRIES_OFF) |
+ ((s->port_id & 0xf) << PT_CAP_DW1_PORTID_OFF);
+ *(cap_start + 2) = 0; /* No Port AG */
+ *(cap_start + 3) = 0; /* No Port FW */
+ *(cap_start + 4) = error_log_offset << 20;
+}
+
+static void ccix_prl_link_cap(PCIDevice *pci_dev,
+ uint16_t this_cap_offset,
+ uint16_t next_cap_offset,
+ uint16_t error_log_offset)
+{
+ uint32_t *cap_start = (uint32_t *)(pci_dev->config + this_cap_offset);
+
+ ccix_fill_cap_header(pci_dev, this_cap_offset, CCIX_COMP_ID_PRL_LINK,
+ next_cap_offset);
+
+ *(cap_start + 1) = (1 << LK_CAP_DW1_DISC_READY_OFF) |
+ (0 << LK_CAP_DW1_CREDIT_TYPE_OFF) |
+ (1 << LK_CAP_DW1_MESSAGE_PACKING_OFF) |
+ (0 << LK_CAP_DW1_NOCOMPACK_OFF) |
+ (2 << LK_CAP_DW1_MAX_PKT_SIZE_OFF);
+
+ *(cap_start + 2) = (3 << LK_CAP_DW2_MAX_MEM_REQ_SEND_OFF) |
+ (4 << LK_CAP_DW2_MAX_SNP_REQ_SEND_OFF) |
+ (5 << LK_CAP_DW2_MAX_DAT_REQ_SEND_OFF);
+ *(cap_start + 3) = (6 << LK_CAP_DW3_MAX_MEM_REQ_RECV_OFF) |
+ (7 << LK_CAP_DW3_MAX_SNP_REQ_RECV_OFF) |
+ (8 << LK_CAP_DW3_MAX_DAT_REQ_RECV_OFF);
+ *(cap_start + 4) = (9 << LK_CAP_DW4_MAX_MISC_REQ_SEND_CAP_OFF) |
+ (10 << LK_CAP_DW4_MAX_MISC_REQ_RECV_CAP_OFF);
+ *(cap_start + 5) = error_log_offset << 20;
+}
+
+static void ccix_prl_common_ctl(PCIDevice *pci_dev,
+ CCIXState *s,
+ uint16_t this_ctl_offset,
+ uint16_t next_ctl_offset)
+{
+ uint32_t *ctl_start = (uint32_t *)(pci_dev->config + this_ctl_offset);
+
+ ccix_fill_cap_header(pci_dev, this_ctl_offset, CCIX_COMP_ID_PRL_COMMON,
+ next_ctl_offset);
+ /* ComnCntl1 */
+ *(ctl_start + 1) = (0 << CM_CTL_DW1_DEVICE_EN_OFF) |
+ (0 << CM_CTL_DW1_PRIMARY_PORT_EN_OFF) |
+ (0 << CM_CTL_DW1_MESH_EN_OFF) |
+ (0 << CM_CTL_DW1_PORT_AG_EN_OFF) |
+ (0 << CM_CTL_DW1_IDM_TABLE_VALID_OFF) |
+ (0 << CM_CTL_DW1_RSAM_TABLE_VALID_OFF) |
+ (0 << CM_CTL_DW1_HSAM_TABLE_VALID_OFF) |
+ (0 << CM_CTL_DW1_SW_PORT_ENABLE_OFF) |
+ (0 << CM_CTL_DW1_ERR_AGENT_ID_OFF) |
+ (0 << CM_CTL_DW1_DEVID_OFF);
+ am_table_add(pci_dev, s, this_ctl_offset + 4, this_ctl_offset, comncntl1_set);
+ s->enable_offset = this_ctl_offset + 4;
+ /* ComnCtl2 */
+ *(ctl_start + 2) = 0; // No support for anything in here yet.
+ am_table_add(pci_dev, s, this_ctl_offset + 8, this_ctl_offset, comncntl2_set);
+ /* No snoop request hash mask yet */
+ /* No SW Service Portal yet */
+}
+
+static void ccix_prl_ra_ctl(PCIDevice *pci_dev,
+ CCIXState *s,
+ uint16_t this_ctl_offset,
+ uint16_t next_ctl_offset,
+ uint16_t ra_id)
+{
+ uint32_t *ctl_start = (uint32_t *)(pci_dev->config + this_ctl_offset);
+
+ ccix_fill_cap_header(pci_dev, this_ctl_offset, CCIX_COMP_ID_RA,
+ next_ctl_offset);
+
+ *(ctl_start + 1) = (0 << RA_CTL_DW1_EN_OFF) |
+ (0 << RA_CTL_DW1_SNOOP_RESP_EN_OFF) |
+ (0 << RA_CTL_DW1_CACHE_FLUSH_EN_OFF) |
+ (0 << RA_CTL_DW1_CACHE_EN_OFF) |
+ (0 << RA_CTL_DW1_RAID_OFF);
+ am_table_add(pci_dev, s, this_ctl_offset + 4, this_ctl_offset, ra_ctl_dw1_set);
+}
+
+static void ccix_prl_ha_ctl(PCIDevice *pci_dev,
+ CCIXState *s,
+ uint16_t this_ctl_offset,
+ uint16_t next_ctl_offset,
+ uint16_t num_ids,
+ uint16_t num_pools)
+{
+ uint32_t *ctl_start = (uint32_t *)(pci_dev->config + this_ctl_offset);
+
+ ccix_fill_cap_header(pci_dev, this_ctl_offset, CCIX_COMP_ID_HA,
+ next_ctl_offset);
+ *(ctl_start + 1) = 0;
+}
+
+static void ccix_prl_port_ctl(PCIDevice *pci_dev,
+ CCIXState *s,
+ uint16_t this_ctl_offset,
+ uint16_t next_ctl_offset,
+ uint8_t psam_table_entries)
+{
+ uint8_t i;
+ uint32_t *ctl_start = (uint32_t *)(pci_dev->config + this_ctl_offset);
+
+ ccix_fill_cap_header(pci_dev, this_ctl_offset, CCIX_COMP_ID_PRL_PORT,
+ next_ctl_offset);
+ /* Port Control */
+ *(ctl_start + 1) = 0;
+ am_table_add(pci_dev, s, this_ctl_offset + 4, this_ctl_offset, port_ctl_dw1_set);
+
+ for (i = 0; i < psam_table_entries; i++) {
+ am_table_add(pci_dev, s, this_ctl_offset + 0x14 + (3 * i + 0) * 4,
+ 0, psam_entry_dw0_set);
+ am_table_add(pci_dev, s, this_ctl_offset + 0x14 + (3 * i + 1) * 4,
+ 0, psam_entry_dw1_set);
+ am_table_add(pci_dev, s, this_ctl_offset + 0x14 + (3 * i + 2) * 4,
+ 0, psam_entry_dw2_set);
+ }
+}
+
+static void ccix_prl_link_ctl(PCIDevice *pci_dev,
+ CCIXState *s,
+ uint16_t this_ctl_offset,
+ uint16_t next_ctl_offset,
+ uint8_t num_links)
+{
+ uint32_t *ctl_start = (uint32_t *)(pci_dev->config + this_ctl_offset);
+ uint8_t i;
+
+ ccix_fill_cap_header(pci_dev, this_ctl_offset, CCIX_COMP_ID_PRL_LINK,
+ next_ctl_offset);
+
+ for (i = 0; i < num_links; i++) {
+ /* LinkAttrCntl for link i */
+ *(ctl_start + 1 + i * 6) = 0;
+ am_table_add(pci_dev, s, this_ctl_offset + 4 + i * 24, 0, lk_ctl_dw1_set);
+ /* LinkMaxCreditCntl for link i */
+ *(ctl_start + 1 + i * 6 + 1) = 0;
+ am_table_add(pci_dev, s, this_ctl_offset + 4 + i * 24 + 4, 0, lk_ctl_dw2_set);
+ *(ctl_start + 1 + i * 6 + 2) = 0;
+ am_table_add(pci_dev, s, this_ctl_offset + 4 + i * 24 + 8, 0, lk_ctl_dw3_set);
+ *(ctl_start + 1 + i * 6 + 3) = 0;
+ am_table_add(pci_dev, s, this_ctl_offset + 4 + i * 24 + 0xC, 0, lk_ctl_dw4_set);
+
+ /* error fields - to do */
+ *(ctl_start + 1 + i * 24 + 4) = 0;
+ *(ctl_start + 1 + i * 24 + 5) = 0;
+ }
+ /* transport id */
+ for (i = 0; i < num_links; i++)
+ //to check
+ *(ctl_start + 1 + num_links * 6 + 4 * i) = 0;
+}
+
+static void ccix_idm(PCIDevice *pci_dev, CCIXState *s, uint16_t offset)
+{
+ int i;
+
+ for (i = 0; i < 64; i++)
+ am_table_add(pci_dev, s, offset + i * 4, 0, idm_entry_set);
+}
+
+static void ccix_sam(PCIDevice *pci_dev, CCIXState *s, uint16_t offset, uint8_t entries)
+{
+ int i;
+
+ for (i = 0; i < entries; i++) {
+ am_table_add(pci_dev, s, offset + 3 * i * 4, 0, sam_entry_dw0_set);
+ am_table_add(pci_dev, s, offset + (3 * i + 1) * 4, 0, sam_entry_dw1_set);
+ am_table_add(pci_dev, s, offset + (3 * i + 2) * 4, 0, sam_entry_dw2_set);
+ }
+}
+static void ccix_guid(PCIDevice *pci_dev, uint16_t offset)
+{
+ uint32_t *start = (uint32_t *)(pci_dev->config + offset);
+
+ *(start) = CCIX_GUID_DW0;
+ *(start + 1) = CCIX_GUID_DW1;
+ *(start + 2) = CCIX_GUID_DW2;
+ *(start + 3) = CCIX_GUID_DW3;
+ *(start + 4) = 1 << CCIX_GUID_DW4_VERSION_OFF;
+}
+
+/*FIXME: Is this generic enough that we should put it in the PCIe core */
+
+#define CCIX_DVSEC_HEADER_SIZE 0x14
+#define PCIE_DVSEC_HEADER_OFFSET 0x4 /* Offset from start of extend cap */
+#define PCIE_DVSEC_ID_OFFSET 0x8
+static void pcie_add_dvsec(PCIDevice *pci_dev, uint16_t offset,
+ uint16_t size, uint16_t vendor_id, uint16_t dvsec_id,
+ uint16_t ccix_guid_offset)
+{
+ uint16_t *word_addr;
+
+ pcie_add_capability(pci_dev, PCI_EXT_CAP_ID_DVSEC, 1, offset, size);
+ pci_set_word(pci_dev->config + offset + PCIE_DVSEC_HEADER_OFFSET,
+ vendor_id);
+ word_addr = (uint16_t *)(pci_dev->config + offset + PCIE_DVSEC_HEADER_OFFSET + 2);
+ *word_addr = 0x1 | (size << 4);
+
+ word_addr = (uint16_t *)(pci_dev->config + offset + PCIE_DVSEC_ID_OFFSET);
+ *word_addr = dvsec_id;
+ word_addr = (uint16_t *)(pci_dev->config + offset + PCIE_DVSEC_ID_OFFSET + 2);
+ *word_addr = (ccix_guid_offset << 4);
+}
+
+void ccix_set_port(CCIXState *s)
+{
+ s->flags |= (1 << CCIX_IS_PORT);
+ /* Enforce rule of how many psam entries if links is greater than 1 */
+ if ((s->num_links > 1) &&
+ (s->psam_entries < s->num_links + 1)) {
+ printf("Increased psam entries to minimum allowed\n");
+ s->psam_entries = s->num_links + 1;
+ }
+}
+
+enum ccix_entry_type {
+ common_cap,
+ ra_cap,
+ ha_cap,
+ port_cap,
+ link_cap,
+ end_cap,
+ common_ctl,
+ ra_ctl,
+ ha_ctl,
+ port_ctl,
+ link_ctl,
+ end_ctl,
+ idm_table,
+ sr_table,
+ rsam,
+ hsam,
+ guid,
+};
+
+struct ccix_cap {
+ uint16_t size;
+ uint16_t offset;
+ enum ccix_entry_type type;
+};
+
+static void ccix_add_cap(GList **l, enum ccix_entry_type type, uint16_t size)
+{
+ struct ccix_cap *c;
+
+ c = malloc(sizeof(*c));
+ c->type = type;
+ c->size = size;
+ *l = g_list_append(*l, c);
+}
+
+uint16_t ccix_add_prldvsec(PCIDevice *pci_dev, CCIXState *s, uint16_t offset)
+{
+ /* Runtime configuration of the following is not yet implemented */
+ uint16_t ha_num_pools = 2;
+ uint16_t ha_num_ids = 8;
+ /* Error log not yet implemented */
+ uint16_t error_log_offset = 0;
+ uint16_t idm_offset = 0;
+ uint16_t sr_offset = 0;
+ uint16_t rsam_offset = 0;
+ uint16_t hsam_offset = 0;
+ uint16_t guid_offset = 0;
+ uint16_t max_offset = 0;
+ uint16_t next_offset;
+ uint16_t cap_size = 0;
+ uint16_t cap_offset;
+ uint16_t ctl_size = 0;
+ uint16_t ctl_offset;
+ struct ccix_cap *c;
+ GList *li;
+ GList *cap_list = NULL;
+ GList *ctl_list = NULL;
+ GList *other_list = NULL;
+ int i;
+
+ /*
+ * Build up lists of the CCIX capabilities, controls and other blocks so that we
+ * can work out their layout in config space before writing the header.
+ */
+ ccix_add_cap(&cap_list, common_cap, CCIX_COMMON_CAP_MAX_SIZE);
+ if (s->flags & (1 << PRIMARY_PORT_BIT))
+ ccix_add_cap(&ctl_list, common_ctl, 0x10);
+ for (i = 0; i < s->num_ras; i++) {
+ ccix_add_cap(&cap_list, ra_cap, CCIX_RA_CAP_SIZE);
+ ccix_add_cap(&ctl_list, ra_ctl, CCIX_RA_CTL_SIZE);
+ }
+ for (i = 0; i < s->num_has; i++) {
+ ccix_add_cap(&cap_list, ha_cap, CCIX_HA_CAP_SIZE + CCIX_POOL_CAP_SIZE * ha_num_pools);
+ ccix_add_cap(&ctl_list, ha_ctl,
+ CCIX_HA_CTL_SIZE +
+ (ha_num_ids + 3) / 4 * CCIX_HAID_ENTRY_SIZE +
+ ha_num_pools * CCIX_BAT_ENTRY_SIZE);
+ }
+
+ /* Only functions with CCIX ports may have port and link structures */
+ if (s->flags & (1 << CCIX_IS_PORT)) {
+ ccix_add_cap(&cap_list, port_cap, CCIX_PORT_CAP_SIZE);
+ ccix_add_cap(&ctl_list, port_ctl,
+ CCIX_PORT_CTL_SIZE + 12 * s->psam_entries);
+ ccix_add_cap(&cap_list, link_cap, CCIX_LINK_CAP_SIZE);
+ ccix_add_cap(&ctl_list, link_ctl,
+ CCIX_LINK_CTL_SIZE + CCIX_LINK_CTL_PER_LINK_SIZE * s->num_links);
+ }
+
+ if (s->flags & (1 << PRIMARY_PORT_BIT)) {
+ ccix_add_cap(&other_list, idm_table, 64 * 4);
+ if (s->rsam_entries)
+ ccix_add_cap(&other_list, rsam, CCIX_SAM_ENTRY_SIZE * s->rsam_entries);
+ if (s->hsam_entries)
+ ccix_add_cap(&other_list, hsam, CCIX_SAM_ENTRY_SIZE * s->hsam_entries);
+ ccix_add_cap(&other_list, guid, CCIX_GUID_SIZE);
+ }
+
+ cap_offset = offset + CCIX_DVSEC_HEADER_SIZE;
+ next_offset = cap_offset;
+ for (li = cap_list; li != NULL; li = li->next) {
+ c = (struct ccix_cap *)li->data;
+ c->offset = next_offset;
+ if (li->next == NULL)
+ next_offset = 0;
+ else {
+ next_offset = c->offset + c->size;
+ }
+ cap_size += c->size;
+ max_offset = c->offset + c->size;
+ }
+
+ next_offset = max_offset;
+ ctl_offset = max_offset;
+ for (li = ctl_list; li != NULL; li = li->next) {
+ c = (struct ccix_cap *)li->data;
+ c->offset = next_offset;
+ if (li->next == NULL)
+ next_offset = 0;
+ else
+ next_offset = c->offset + c->size;
+
+ max_offset = c->offset + c->size;
+ ctl_size += c->size;
+ }
+
+ next_offset = max_offset;
+ for (li = other_list; li != NULL; li = li->next) {
+ c = (struct ccix_cap *)li->data;
+ c->offset = next_offset;
+ switch (c->type) {
+ case
+ idm_table: idm_offset = c->offset;
+ break;
+ case sr_table:
+ sr_offset = c->offset;
+ break;
+ case hsam:
+ hsam_offset = c->offset;
+ break;
+ case rsam:
+ rsam_offset = c->offset;
+ break;
+ case guid:
+ guid_offset = c->offset;
+ break;
+ default:
+ break;
+ }
+ if (li->next == NULL)
+ next_offset = 0;
+ else
+ next_offset = c->offset + c->size;
+ max_offset = c->offset + c->size;
+ }
+
+ /* Will eventually take some description and make a prl. For now one RA */
+ pcie_add_dvsec(pci_dev, offset, max_offset - offset, PCI_VENDOR_ID_CCIX,
+ CCIX_COMP_ID_PRL_DVSEC, guid_offset);
+
+ ccix_dvsec_fill_dvsec_header(pci_dev, offset, cap_size, cap_offset,
+ ctl_size, ctl_offset);
+
+ for (li = cap_list; li != NULL; li = li->next) {
+ c = (struct ccix_cap *)li->data;
+ if (li->next)
+ next_offset = ((struct ccix_cap *)(li->next->data))->offset;
+ else
+ next_offset = 0;
+ switch (c->type) {
+ case common_cap:
+ ccix_prl_common_cap(pci_dev, s, c->offset, next_offset,
+ 0, /* No support for device error log */
+ idm_offset,
+ rsam_offset, CCIX_SAM_ENTRY_SIZE * s->rsam_entries,
+ hsam_offset, CCIX_SAM_ENTRY_SIZE * s->hsam_entries,
+ sr_offset,
+ 0 /* No support for sw portal */);
+ break;
+ case ra_cap:
+ ccix_prl_ra_cap(pci_dev, c->offset, next_offset, error_log_offset);
+ break;
+ case ha_cap:
+ ccix_prl_ha_cap(pci_dev, c->offset, next_offset, error_log_offset,
+ ha_num_ids, ha_num_pools);
+ break;
+ case port_cap:
+ ccix_prl_port_cap(pci_dev, s, c->offset, next_offset, error_log_offset);
+ break;
+ case link_cap:
+ ccix_prl_link_cap(pci_dev, c->offset, next_offset, error_log_offset);
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (li = ctl_list; li != NULL; li = li->next) {
+ c = (struct ccix_cap *)li->data;
+ if (li->next)
+ next_offset = ((struct ccix_cap *)(li->next->data))->offset;
+ else
+ next_offset =0;
+ switch (c->type) {
+ case common_ctl:
+ ccix_prl_common_ctl(pci_dev, s, c->offset, next_offset);
+ break;
+ case ra_ctl:
+ ccix_prl_ra_ctl(pci_dev, s, c->offset, next_offset, 0);
+ break;
+ case ha_ctl:
+ ccix_prl_ha_ctl(pci_dev, s, c->offset, next_offset, ha_num_ids, ha_num_pools);
+ break;
+ case port_ctl:
+ ccix_prl_port_ctl(pci_dev, s, c->offset, next_offset, s->psam_entries);
+ break;
+ case link_ctl:
+ ccix_prl_link_ctl(pci_dev, s, c->offset, next_offset, s->num_links);
+ break;
+ default:
+ break;
+ }
+ }
+
+ for (li = other_list; li != NULL; li = li->next) {
+ c = (struct ccix_cap *)li->data;
+ switch(c->type) {
+ case idm_table:
+ ccix_idm(pci_dev, s, c->offset);
+ break;
+ case sr_table:
+ ccix_idm(pci_dev, s, c->offset);
+ break;
+ case rsam:
+ ccix_sam(pci_dev, s, c->offset, s->rsam_entries);
+ break;
+ case hsam:
+ ccix_sam(pci_dev, s, c->offset, s->hsam_entries);
+ break;
+ case guid:
+ ccix_guid(pci_dev, c->offset);
+ break;
+ default:
+ break;
+ }
+ }
+ return max_offset;
+}
+
+uint16_t ccix_add_tdldvsec(PCIDevice *pci_dev, uint16_t offset)
+{
+ const uint16_t dvsec_size = 17 * sizeof(uint32_t);
+
+ pcie_add_dvsec(pci_dev, offset, dvsec_size, PCI_VENDOR_ID_CCIX,
+ CCIX_COMP_ID_TDL_DVSEC, 0);
+
+ return offset + dvsec_size;
+}
+
+void ccix_register(CCIXState *s)
+{
+ int i;
+
+ if (s->ccix_dev_name)
+ for (i = 0; i < sizeof(CCIXFuncs)/sizeof(*CCIXFuncs); i++)
+ if (!CCIXFuncs[i]) {
+ CCIXFuncs[i] = s;
+ break;
+ }
+}
new file mode 100644
@@ -0,0 +1,28 @@
+
+#include "hw/pci/pcie_port.h"
+typedef struct CCIXState {
+ GTree *am_tree;
+#define PRIMARY_PORT_BIT 0
+#define CCIX_IS_PORT 1
+ uint32_t flags;
+ char *ccix_dev_name;
+ uint8_t port_id;
+ uint8_t num_links;
+ uint8_t psam_entries;
+ uint8_t num_ras;
+ uint8_t num_has;
+ uint8_t rsam_entries;
+ uint8_t hsam_entries;
+ PCIDevice *pci_dev;
+ uint16_t enable_offset;
+} CCIXState;
+
+extern CCIXState *CCIXFuncs[256];
+
+uint16_t ccix_add_prldvsec(PCIDevice *pci_dev, CCIXState *s, uint16_t offset);
+void ccix_write_config(PCIDevice *pci_dev, CCIXState *s, uint32_t addr, uint32_t val_in, int l);
+uint16_t ccix_add_tdldvsec(PCIDevice *pci_dev, uint16_t offset);
+void initialize_ccixstate(CCIXState *s, PCIDevice *pci_dev);
+void ccix_register(CCIXState *s);
+
+void ccix_set_port(CCIXState *s);
@@ -225,6 +225,8 @@
#define PCI_DEVICE_ID_HUAWEI_CCIX_DOWN 0xA261
#define PCI_DEVICE_ID_HUAWEI_CCIX_EP 0xA262
+#define PCI_VENDOR_ID_CCIX 0x1e2c
+
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_82378 0x0484
#define PCI_DEVICE_ID_INTEL_82441 0x1237
The nature of the complex topologies supported by CCIX means that it will be sometime before it is possible to construct many of the interesting cases in hardware, and it will be extermely hard to exercise all of the combinations whilst developing firwmare and drivers. To that end, the intent of this library and following device emulations is to allow the construction of complex CCIX toplogies via their overlaying on PCIe. The CCIX topologies are configured through CCIX specific PCIe DVSEC capabillity and control structures. A typical mesh capable CCIX device will overlay onto N upstream PCIe switch ports, M downstream PCIe switch ports, function 0 EPs which are not PCIe switch upstream ports, P functions 1+ which have additional CCIX protocol elements described and Q functions 1+ which respresent acceleration functions, with no CCIX protocol elements (these look to be normal PCIe functions so are not covered by this patch set). Signed-off-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> --- hw/pci/Kconfig | 3 + hw/pci/Makefile.objs | 1 + hw/pci/ccix_lib.c | 1299 ++++++++++++++++++++++++++++++++++++++ include/hw/misc/ccix.h | 28 + include/hw/pci/pci_ids.h | 2 + 5 files changed, 1333 insertions(+) -- 2.20.1